diff --git a/.ci/init.gradle b/.ci/init.gradle new file mode 100644 index 0000000000000..aa10c3e268269 --- /dev/null +++ b/.ci/init.gradle @@ -0,0 +1,97 @@ +import com.bettercloud.vault.VaultConfig; +import com.bettercloud.vault.Vault; + +initscript { + repositories { + mavenCentral() + } + dependencies { + classpath 'com.bettercloud:vault-java-driver:4.1.0' + } +} + +['VAULT_ADDR', 'VAULT_ROLE_ID', 'VAULT_SECRET_ID'].each { + if (System.env."$it" == null) { + throw new GradleException("$it must be set!") + + } +} + +final String vaultToken = new Vault( + new VaultConfig() + .address(System.env.VAULT_ADDR) + .engineVersion(1) + .build() + ) + .withRetries(5, 1000) + .auth() + .loginByAppRole("approle", System.env.VAULT_ROLE_ID, System.env.VAULT_SECRET_ID) + .getAuthClientToken(); + +final Vault vault = new Vault( + new VaultConfig() + .address(System.env.VAULT_ADDR) + .engineVersion(1) + .token(vaultToken) + .build() +) + .withRetries(5, 1000) + +final Map artifactoryCredentials = vault.logical() + .read("secret/elasticsearch-ci/artifactory.elstc.co") + .getData(); + +logger.info("Using elastic artifactory repos") +Closure configCache = { + return { + name "artifactory-gradle-release" + url "https://artifactory.elstc.co/artifactory/gradle-release" + credentials { + username artifactoryCredentials.get("username") + password artifactoryCredentials.get("token") + } + } +} +settingsEvaluated { settings -> + settings.pluginManagement { + repositories { + maven configCache() + } + } +} +projectsLoaded { + allprojects { + buildscript { + repositories { + maven configCache() + } + } + repositories { + maven configCache() + } + } + rootProject { + project.pluginManager.withPlugin('com.gradle.build-scan') { + buildScan.server = 'https://gradle-enterprise.elastic.co' + } + } +} + +if (System.env.GRADLE_BUILD_CACHE_URL != null) { + final Map buildCacheCredentials = vault.logical() + .read("secret/elasticsearch-ci/gradle-build-cache") + .getData(); + gradle.settingsEvaluated { settings -> + settings.buildCache { + remote(HttpBuildCache) { + url = System.getenv('GRADLE_BUILD_CACHE_URL') + push = Boolean.valueOf(System.getenv('GRADLE_BUILD_CACHE_PUSH') ?: 'false') + credentials { + username = buildCacheCredentials.get("username") + password = buildCacheCredentials.get("password") + } + } + } + } +} + diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index c902a69207108..ac8682d976972 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -4,7 +4,7 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=java11 +ES_BUILD_JAVA=openjdk12 ES_RUNTIME_JAVA=java8 GRADLE_TASK=build - +GRADLE_EXTRA_ARGS=--no-parallel diff --git a/.ci/matrix-build-javas.yml b/.ci/matrix-build-javas.yml index 795e2e81f5f92..202fd60edea4c 100644 --- a/.ci/matrix-build-javas.yml +++ b/.ci/matrix-build-javas.yml @@ -6,5 +6,4 @@ # or 'openjdk' followed by the major release number. ES_BUILD_JAVA: - - java11 - openjdk12 diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 0cea9c939e149..0f945376ee4b0 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -9,6 +9,10 @@ ES_RUNTIME_JAVA: - java8 - java8fips - java11 + - java12 - openjdk12 - zulu8 - zulu11 + - zulu12 + - corretto11 + - corretto8 diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index ce78ebcc1cae7..9f48efc82d6a5 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -21,4 +21,5 @@ export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 export JAVA12_HOME="${HOME}"/.java/java12 -./gradlew --parallel clean pullFixture --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies +./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 23ca7a299e588..8a72b530bc527 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,11 +92,11 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) -JDK 11 is required to build Elasticsearch. You must have a JDK 11 installation +JDK 12 is required to build Elasticsearch. You must have a JDK 12 installation with the environment variable `JAVA_HOME` referencing the path to Java home for -your JDK 11 installation. By default, tests use the same runtime as `JAVA_HOME`. +your JDK 12 installation. By default, tests use the same runtime as `JAVA_HOME`. However, since Elasticsearch supports JDK 8, the build supports compiling with -JDK 11 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` +JDK 12 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME` pointing to the Java home of a JDK 8 installation. Note that this mechanism can be used to test against other JDKs as well, this is not only limited to JDK 8. diff --git a/NOTICE.txt b/NOTICE.txt index f1e3198ab4a9a..9dd784d5a09ec 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -3,3 +3,6 @@ Copyright 2009-2018 Elasticsearch This product includes software developed by The Apache Software Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/README.textile b/README.textile index 5912c0371fa2e..699b29e6a905b 100644 --- a/README.textile +++ b/README.textile @@ -150,14 +150,14 @@ curl -XPUT 'http://localhost:9200/kimchy/_doc/2?pretty' -H 'Content-Type: applic The above will index information into the @kimchy@ index. Each user will get their own special index. -Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well): +Complete control on the index level is allowed. As an example, in the above case, we might want to change from the default 1 shards with 1 replica per index, to 2 shards with 1 replica per index (because this user tweets a lot). Here is how this can be done (the configuration can be in yaml as well):
 curl -XPUT http://localhost:9200/another_user?pretty -H 'Content-Type: application/json' -d '
 {
-    "index" : {
-        "number_of_shards" : 1,
-        "number_of_replicas" : 1
+    "settings" : {
+        "index.number_of_shards" : 2,
+        "index.number_of_replicas" : 1
     }
 }'
 
diff --git a/TESTING.asciidoc b/TESTING.asciidoc index cf4a40713114c..17315b8828a7c 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -16,6 +16,15 @@ following: ./gradlew assemble ----------------------------- +To create a platform-specific build including the x-pack modules, use the +following depending on your operating system: + +----------------------------- +./gradlew :distribution:archives:linux-tar:assemble --parallel +./gradlew :distribution:archives:darwin-tar:assemble --parallel +./gradlew :distribution:archives:windows-zip:assemble --parallel +----------------------------- + === Running Elasticsearch from a checkout In order to run Elasticsearch from source without building a package, you can @@ -366,8 +375,9 @@ These are the linux flavors supported, all of which we provide images for * debian-9 aka stretch, the current debian stable distribution * centos-6 * centos-7 -* fedora-27 +* rhel-8 * fedora-28 +* fedora-29 * oel-6 aka Oracle Enterprise Linux 6 * oel-7 aka Oracle Enterprise Linux 7 * sles-12 @@ -495,8 +505,8 @@ sudo bash $PACKAGING_TESTS/run-tests.sh # run specific test cases sudo bash $PACKAGING_TESTS/run-tests.sh \ - org.elasticsearch.packaging.test.DefaultZipTests \ - org.elasticsearch.packaging.test.OssZipTests + org.elasticsearch.packaging.test.DefaultWindowsZipTests \ + org.elasticsearch.packaging.test.OssWindowsZipTests -------------------------------------------- or on Windows, from a terminal running as Administrator @@ -507,8 +517,8 @@ powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 # run specific test cases powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 ` - org.elasticsearch.packaging.test.DefaultZipTests ` - org.elasticsearch.packaging.test.OssZipTests + org.elasticsearch.packaging.test.DefaultWindowsZipTests ` + org.elasticsearch.packaging.test.OssWindowsZipTests -------------------------------------------- Note that on Windows boxes when running from inside the GUI, you may have to log out and diff --git a/Vagrantfile b/Vagrantfile index e47dc81164901..a4d314a579b1d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -109,13 +109,13 @@ Vagrant.configure(2) do |config| rpm_common config, box end end - 'fedora-27'.tap do |box| + 'fedora-28'.tap do |box| config.vm.define box, define_opts do |config| - config.vm.box = 'elastic/fedora-27-x86_64' + config.vm.box = 'elastic/fedora-28-x86_64' dnf_common config, box end end - 'fedora-28'.tap do |box| + 'fedora-29'.tap do |box| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/fedora-28-x86_64' dnf_common config, box @@ -133,6 +133,12 @@ Vagrant.configure(2) do |config| sles_common config, box end end + 'rhel-8'.tap do |box| + config.vm.define box, define_opts do |config| + config.vm.box = 'elastic/rhel-8-x86_64' + rpm_common config, box + end + end windows_2012r2_box = ENV['VAGRANT_WINDOWS_2012R2_BOX'] if windows_2012r2_box && windows_2012r2_box.empty? == false @@ -346,6 +352,13 @@ def sh_install_deps(config, echo "==> Java is not installed" return 1 } + cat \<\ /etc/profile.d/java_home.sh +if [ -z "\\\$JAVA_HOME" ]; then + export JAVA_HOME=$(dirname $(dirname $(readlink -f $(which java)))) +fi +export SYSTEM_JAVA_HOME=\\\$JAVA_HOME +unset JAVA_HOME +JAVA ensure tar ensure curl ensure unzip @@ -382,6 +395,8 @@ Defaults env_keep += "BATS_UTILS" Defaults env_keep += "BATS_TESTS" Defaults env_keep += "PACKAGING_ARCHIVES" Defaults env_keep += "PACKAGING_TESTS" +Defaults env_keep += "JAVA_HOME" +Defaults env_keep += "SYSTEM_JAVA_HOME" SUDOERS_VARS chmod 0440 /etc/sudoers.d/elasticsearch_vars SHELL @@ -402,6 +417,9 @@ def windows_common(config, name) config.vm.provision 'set env variables', type: 'shell', inline: <<-SHELL $ErrorActionPreference = "Stop" [Environment]::SetEnvironmentVariable("PACKAGING_ARCHIVES", "C:/project/build/packaging/archives", "Machine") + $javaHome = [Environment]::GetEnvironmentVariable("JAVA_HOME", "Machine") + [Environment]::SetEnvironmentVariable("SYSTEM_JAVA_HOME", $javaHome, "Machine") [Environment]::SetEnvironmentVariable("PACKAGING_TESTS", "C:/project/build/packaging/tests", "Machine") + [Environment]::SetEnvironmentVariable("JAVA_HOME", $null, "Machine") SHELL end diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 351b0b9e97118..13eca8f06c26f 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -24,7 +24,7 @@ mainClassName = 'org.openjdk.jmh.Main' assemble.enabled = false archivesBaseName = 'elasticsearch-benchmarks' -unitTest.enabled = false +test.enabled = false dependencies { compile("org.elasticsearch:elasticsearch:${version}") { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/RoundingBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/RoundingBenchmark.java index 6da6d5290bfee..0928a7565607b 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/RoundingBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/RoundingBenchmark.java @@ -34,8 +34,14 @@ import org.openjdk.jmh.annotations.Warmup; import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.Rounding.DateTimeUnit.DAY_OF_MONTH; +import static org.elasticsearch.common.Rounding.DateTimeUnit.MONTH_OF_YEAR; +import static org.elasticsearch.common.Rounding.DateTimeUnit.QUARTER_OF_YEAR; +import static org.elasticsearch.common.Rounding.DateTimeUnit.YEAR_OF_CENTURY; + @Fork(3) @Warmup(iterations = 10) @Measurement(iterations = 10) @@ -48,23 +54,13 @@ public class RoundingBenchmark { private final ZoneId zoneId = ZoneId.of("Europe/Amsterdam"); private final DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); + private long timestamp = 1548879021354L; + private final org.elasticsearch.common.rounding.Rounding jodaRounding = - org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(timeZone).build(); + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(timeZone).build(); private final Rounding javaRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY) .timeZone(zoneId).build(); - private final org.elasticsearch.common.rounding.Rounding jodaDayOfMonthRounding = - org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(timeZone).build(); - private final Rounding javaDayOfMonthRounding = Rounding.builder(TimeValue.timeValueMinutes(60)) - .timeZone(zoneId).build(); - - private final org.elasticsearch.common.rounding.Rounding timeIntervalRoundingJoda = - org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(timeZone).build(); - private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)) - .timeZone(zoneId).build(); - - private final long timestamp = 1548879021354L; - @Benchmark public long timeRoundingDateTimeUnitJoda() { return jodaRounding.round(timestamp); @@ -75,6 +71,11 @@ public long timeRoundingDateTimeUnitJava() { return javaRounding.round(timestamp); } + private final org.elasticsearch.common.rounding.Rounding jodaDayOfMonthRounding = + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(timeZone).build(); + private final Rounding javaDayOfMonthRounding = Rounding.builder(DAY_OF_MONTH) + .timeZone(zoneId).build(); + @Benchmark public long timeRoundingDateTimeUnitDayOfMonthJoda() { return jodaDayOfMonthRounding.round(timestamp); @@ -85,6 +86,11 @@ public long timeRoundingDateTimeUnitDayOfMonthJava() { return javaDayOfMonthRounding.round(timestamp); } + private final org.elasticsearch.common.rounding.Rounding timeIntervalRoundingJoda = + org.elasticsearch.common.rounding.Rounding.builder(TimeValue.timeValueMinutes(60)).timeZone(timeZone).build(); + private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)) + .timeZone(zoneId).build(); + @Benchmark public long timeIntervalRoundingJava() { return timeIntervalRoundingJava.round(timestamp); @@ -94,4 +100,65 @@ public long timeIntervalRoundingJava() { public long timeIntervalRoundingJoda() { return timeIntervalRoundingJoda.round(timestamp); } + + private final org.elasticsearch.common.rounding.Rounding timeUnitRoundingUtcDayOfMonthJoda = + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.UTC).build(); + private final Rounding timeUnitRoundingUtcDayOfMonthJava = Rounding.builder(DAY_OF_MONTH) + .timeZone(ZoneOffset.UTC).build(); + + @Benchmark + public long timeUnitRoundingUtcDayOfMonthJava() { + return timeUnitRoundingUtcDayOfMonthJava.round(timestamp); + } + + @Benchmark + public long timeUnitRoundingUtcDayOfMonthJoda() { + return timeUnitRoundingUtcDayOfMonthJoda.round(timestamp); + } + + private final org.elasticsearch.common.rounding.Rounding timeUnitRoundingUtcQuarterOfYearJoda = + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.QUARTER).timeZone(DateTimeZone.UTC).build(); + private final Rounding timeUnitRoundingUtcQuarterOfYearJava = Rounding.builder(QUARTER_OF_YEAR) + .timeZone(ZoneOffset.UTC).build(); + + @Benchmark + public long timeUnitRoundingUtcQuarterOfYearJava() { + return timeUnitRoundingUtcQuarterOfYearJava.round(timestamp); + } + + @Benchmark + public long timeUnitRoundingUtcQuarterOfYearJoda() { + return timeUnitRoundingUtcQuarterOfYearJoda.round(timestamp); + } + + private final org.elasticsearch.common.rounding.Rounding timeUnitRoundingUtcMonthOfYearJoda = + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(DateTimeZone.UTC).build(); + private final Rounding timeUnitRoundingUtcMonthOfYearJava = Rounding.builder(MONTH_OF_YEAR) + .timeZone(ZoneOffset.UTC).build(); + + @Benchmark + public long timeUnitRoundingUtcMonthOfYearJava() { + return timeUnitRoundingUtcMonthOfYearJava.round(timestamp); + } + + @Benchmark + public long timeUnitRoundingUtcMonthOfYearJoda() { + return timeUnitRoundingUtcMonthOfYearJoda.round(timestamp); + } + + + private final org.elasticsearch.common.rounding.Rounding timeUnitRoundingUtcYearOfCenturyJoda = + org.elasticsearch.common.rounding.Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(DateTimeZone.UTC).build(); + private final Rounding timeUnitRoundingUtcYearOfCenturyJava = Rounding.builder(YEAR_OF_CENTURY) + .timeZone(ZoneOffset.UTC).build(); + + @Benchmark + public long timeUnitRoundingUtcYearOfCenturyJava() { + return timeUnitRoundingUtcYearOfCenturyJava.round(timestamp); + } + + @Benchmark + public long timeUnitRoundingUtcYearOfCenturyJoda() { + return timeUnitRoundingUtcYearOfCenturyJoda.round(timestamp); + } } diff --git a/build.gradle b/build.gradle index d50801bd207f4..af95b1e4ca9a2 100644 --- a/build.gradle +++ b/build.gradle @@ -29,7 +29,7 @@ import org.gradle.util.DistributionLocator import org.gradle.plugins.ide.eclipse.model.SourceFolder plugins { - id 'com.gradle.build-scan' version '2.0.2' + id 'com.gradle.build-scan' version '2.2.1' id 'base' } if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { @@ -86,7 +86,7 @@ subprojects { } repositories { maven { - name = 'localTest' + name = 'test' url = "${rootProject.buildDir}/local-test-repo" } } @@ -159,7 +159,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = true + +boolean bwc_tests_enabled = true final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { @@ -170,6 +171,11 @@ if (bwc_tests_enabled == false) { println "See ${bwc_tests_disabled_issue}" println "===========================================================" } +if (project.gradle.startParameter.taskNames.find { it.startsWith("checkPart") } != null) { + // Disable BWC tests for checkPart* tasks as it's expected that this will run un it's own check + bwc_tests_enabled = false +} + subprojects { ext.bwc_tests_enabled = bwc_tests_enabled } @@ -235,7 +241,7 @@ allprojects { "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', // for security example plugins "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core', - "org.elasticsearch.client.x-pack-transport:${version}": ':x-pack:transport-client' + "org.elasticsearch.client:x-pack-transport:${version}": ':x-pack:transport-client' ] /* @@ -331,14 +337,6 @@ gradle.projectsEvaluated { integTest.mustRunAfter test } configurations.all { Configuration configuration -> - /* - * The featureAwarePlugin configuration has a dependency on x-pack:plugin:core and x-pack:plugin:core has a dependency on the - * featureAwarePlugin configuration. The below task ordering logic would force :x-pack:plugin:core:test - * :x-pack:test:feature-aware:test to depend on each other circularly. We break that cycle here. - */ - if (configuration.name == "featureAwarePlugin") { - return - } dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { @@ -350,7 +348,7 @@ gradle.projectsEvaluated { Task task = project.tasks.findByName(taskName) Task upstreamTask = upstreamProject.tasks.findByName(taskName) if (task != null && upstreamTask != null) { - task.mustRunAfter(upstreamTask) + task.shouldRunAfter(upstreamTask) } } } @@ -375,21 +373,6 @@ allprojects { // also ignore other possible build dirs excludeDirs += file('build') excludeDirs += file('build-eclipse') - - iml { - // fix so that Gradle idea plugin properly generates support for resource folders - // see also https://issues.gradle.org/browse/GRADLE-2975 - withXml { - it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each { - it.attributes().remove('isTestSource') - it.attributes().put('type', 'java-resource') - } - it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each { - it.attributes().remove('isTestSource') - it.attributes().put('type', 'java-test-resource') - } - } - } } } @@ -407,14 +390,6 @@ idea { vcs = 'Git' } } -// Make sure gradle idea was run before running anything in intellij (including import). -File ideaMarker = new File(projectDir, '.local-idea-is-configured') -tasks.idea.doLast { - ideaMarker.setText('', 'UTF-8') -} -if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) { - throw new GradleException('You must run `./gradlew idea` from the root of elasticsearch before importing into IntelliJ') -} // eclipse configuration allprojects { @@ -603,9 +578,10 @@ if (System.properties.get("build.compare") != null) { allprojects { task resolveAllDependencies { - doLast { - configurations.findAll { it.isCanBeResolved() }.each { it.resolve() } - } + dependsOn tasks.matching { it.name == "pullFixture"} + doLast { + configurations.findAll { it.isCanBeResolved() }.each { it.resolve() } + } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 2b5e4f2d24d1f..411f4b7aaa4a9 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -18,6 +18,8 @@ */ import org.gradle.util.GradleVersion +import java.util.regex.Matcher + plugins { id 'java-gradle-plugin' id 'groovy' @@ -197,11 +199,11 @@ if (project != rootProject) { into localDownloads } - unitTest { + test { // The test task is configured to runtimeJava version, but build-tools doesn't support all of them, so test // with compiler instead on the ones that are too old. if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_10) { - jvm = "${project.compilerJavaHome}/bin/java" + executable = "${project.compilerJavaHome}/bin/java" } } @@ -209,12 +211,10 @@ if (project != rootProject) { task integTest(type: Test) { // integration test requires the local testing repo for example plugin builds dependsOn project.rootProject.allprojects.collect { - it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} + it.tasks.matching { it.name == 'publishNebulaPublicationToTestRepository'} } dependsOn setupLocalDownloads exclude "**/*Tests.class" - testClassesDirs = sourceSets.test.output.classesDirs - classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) // tell BuildExamplePluginsIT where to find the example plugins systemProperty ( @@ -226,7 +226,13 @@ if (project != rootProject) { systemProperty 'test.local-test-repo-path', "${rootProject.buildDir}/local-test-repo" systemProperty 'test.local-test-downloads-path', localDownloads systemProperty 'test.version_under_test', version - systemProperty 'test.lucene-snapshot-revision', (versions.lucene =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] + Matcher isLuceneSnapshot = (/\w+-snapshot-([a-z0-9]+)/ =~ versions.lucene) + if (isLuceneSnapshot) { + systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1] + } + maxParallelForks System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel.toString()) as Integer + // These tests run Gradle which doesn't have FIPS support + onlyIf { project.inFipsJvm == false } } check.dependsOn(integTest) @@ -235,6 +241,7 @@ if (project != rootProject) { forbiddenPatterns { exclude '**/*.wav' + exclude '**/*.p12' // the file that actually defines nocommit exclude '**/ForbiddenPatternsTask.java' } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy deleted file mode 100644 index 91355bf2494cd..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/BalancersConfiguration.groovy +++ /dev/null @@ -1,53 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.SuiteBalancer -import com.carrotsearch.ant.tasks.junit4.balancers.ExecutionTimeBalancer -import com.carrotsearch.ant.tasks.junit4.listeners.ExecutionTimesReport -import org.apache.tools.ant.types.FileSet - -class BalancersConfiguration { - // parent task, so executionTime can register an additional listener - RandomizedTestingTask task - List balancers = new ArrayList<>() - - void executionTime(Map properties) { - ExecutionTimeBalancer balancer = new ExecutionTimeBalancer() - - FileSet fileSet = new FileSet() - Object filename = properties.remove('cacheFilename') - if (filename == null) { - throw new IllegalArgumentException('cacheFilename is required for executionTime balancer') - } - fileSet.setIncludes(filename.toString()) - - File cacheDir = task.project.projectDir - Object dir = properties.remove('cacheDir') - if (dir != null) { - cacheDir = new File(dir.toString()) - } - fileSet.setDir(cacheDir) - balancer.add(fileSet) - - int historySize = 10 - Object size = properties.remove('historySize') - if (size instanceof Integer) { - historySize = (Integer)size - } else if (size != null) { - throw new IllegalArgumentException('historySize must be an integer') - } - ExecutionTimesReport listener = new ExecutionTimesReport() - listener.setFile(new File(cacheDir, filename.toString())) - listener.setHistoryLength(historySize) - - if (properties.isEmpty() == false) { - throw new IllegalArgumentException('Unknown properties for executionTime balancer: ' + properties.keySet()) - } - - task.listenersConfig.listeners.add(listener) - balancers.add(balancer) - } - - void custom(SuiteBalancer balancer) { - balancers.add(balancer) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy deleted file mode 100644 index 5fa5baa8ffb0c..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/ListenersConfiguration.groovy +++ /dev/null @@ -1,25 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import com.carrotsearch.ant.tasks.junit4.listeners.antxml.AntXmlReport - - -class ListenersConfiguration { - RandomizedTestingTask task - List listeners = new ArrayList<>() - - void junitReport(Map props) { - AntXmlReport reportListener = new AntXmlReport() - Object dir = props == null ? null : props.get('dir') - if (dir != null) { - reportListener.setDir(task.project.file(dir)) - } else { - reportListener.setDir(new File(task.project.buildDir, 'reports' + File.separator + "${task.name}Junit")) - } - listeners.add(reportListener) - } - - void custom(AggregatedEventListener listener) { - listeners.add(listener) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy deleted file mode 100644 index 3b0348b48990d..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingPlugin.groovy +++ /dev/null @@ -1,58 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import org.gradle.api.Plugin -import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.tasks.TaskContainer - -class RandomizedTestingPlugin implements Plugin { - - void apply(Project project) { - setupSeed(project) - createUnitTestTask(project.tasks) - configureAnt(project.ant) - } - - /** - * Pins the test seed at configuration time so it isn't different on every - * {@link RandomizedTestingTask} execution. This is useful if random - * decisions in one run of {@linkplain RandomizedTestingTask} influence the - * outcome of subsequent runs. Pinning the seed up front like this makes - * the reproduction line from one run be useful on another run. - */ - static void setupSeed(Project project) { - if (project.rootProject.ext.has('testSeed')) { - /* Skip this if we've already pinned the testSeed. It is important - * that this checks the rootProject so that we know we've only ever - * initialized one time. */ - return - } - String testSeed = System.getProperty('tests.seed') - if (testSeed == null) { - long seed = new Random(System.currentTimeMillis()).nextLong() - testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) - } - /* Set the testSeed on the root project first so other projects can use - * it during initialization. */ - project.rootProject.ext.testSeed = testSeed - project.rootProject.subprojects { - project.ext.testSeed = testSeed - } - } - - static void createUnitTestTask(TaskContainer tasks) { - // only create a unitTest task if the `test` task exists as some project don't make use of it. - tasks.matching { it.name == "test" }.all { - // We don't want to run any tests with the Gradle test runner since we add our own randomized runner - it.enabled = false - RandomizedTestingTask unitTest = tasks.create('unitTest', RandomizedTestingTask) - unitTest.description = 'Runs unit tests with the randomized testing framework' - it.dependsOn unitTest - } - } - - static void configureAnt(AntBuilder ant) { - ant.project.addTaskDefinition('junit4:junit4', JUnit4.class) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy deleted file mode 100644 index e5500d60093ed..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ /dev/null @@ -1,330 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.ListenersList -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import groovy.xml.NamespaceBuilder -import groovy.xml.NamespaceBuilderSupport -import org.apache.tools.ant.BuildException -import org.apache.tools.ant.DefaultLogger -import org.apache.tools.ant.Project -import org.apache.tools.ant.RuntimeConfigurable -import org.apache.tools.ant.UnknownElement -import org.elasticsearch.gradle.BuildPlugin -import org.gradle.api.DefaultTask -import org.gradle.api.InvalidUserDataException -import org.gradle.api.file.FileCollection -import org.gradle.api.file.FileTreeElement -import org.gradle.api.specs.Spec -import org.gradle.api.tasks.Input -import org.gradle.api.tasks.InputDirectory -import org.gradle.api.tasks.Optional -import org.gradle.api.tasks.TaskAction -import org.gradle.api.tasks.options.Option -import org.gradle.api.tasks.util.PatternFilterable -import org.gradle.api.tasks.util.PatternSet -import org.gradle.internal.logging.progress.ProgressLoggerFactory -import org.gradle.util.ConfigureUtil - -import javax.inject.Inject - -class RandomizedTestingTask extends DefaultTask { - - // TODO: change to "executable" to match gradle test params? - @Optional - @Input - String jvm = 'java' - - @Optional - @Input - File workingDir = new File(project.buildDir, 'testrun' + File.separator + name) - - @Optional - @Input - FileCollection classpath - - @Input - String parallelism = '1' - - @Input - FileCollection testClassesDirs - - @Optional - @Input - boolean haltOnFailure = true - - @Optional - @Input - boolean shuffleOnSlave = true - - @Optional - @Input - boolean enableAssertions = true - - @Optional - @Input - boolean enableSystemAssertions = true - - @Optional - @Input - boolean leaveTemporary = false - - @Optional - @Input - String ifNoTests = 'ignore' - - @Optional - @Input - String onNonEmptyWorkDirectory = 'fail' - - TestLoggingConfiguration testLoggingConfig = new TestLoggingConfiguration() - - BalancersConfiguration balancersConfig = new BalancersConfiguration(task: this) - ListenersConfiguration listenersConfig = new ListenersConfiguration(task: this) - - List jvmArgs = new ArrayList<>() - - @Optional - @Input - String argLine = null - - Map systemProperties = new HashMap<>() - Map environmentVariables = new HashMap<>() - PatternFilterable patternSet = new PatternSet() - - RandomizedTestingTask() { - outputs.upToDateWhen {false} // randomized tests are never up to date - listenersConfig.listeners.add(new TestProgressLogger(factory: getProgressLoggerFactory())) - listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig)) - } - - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException() - } - - void jvmArgs(Iterable arguments) { - jvmArgs.addAll(arguments) - } - - void jvmArg(String argument) { - jvmArgs.add(argument) - } - - void systemProperty(String property, Object value) { - systemProperties.put(property, value) - } - - void environment(String key, Object value) { - environmentVariables.put(key, value) - } - - void include(String... includes) { - this.patternSet.include(includes); - } - - void include(Iterable includes) { - this.patternSet.include(includes); - } - - void include(Spec includeSpec) { - this.patternSet.include(includeSpec); - } - - void include(Closure includeSpec) { - this.patternSet.include(includeSpec); - } - - void exclude(String... excludes) { - this.patternSet.exclude(excludes); - } - - void exclude(Iterable excludes) { - this.patternSet.exclude(excludes); - } - - void exclude(Spec excludeSpec) { - this.patternSet.exclude(excludeSpec); - } - - void exclude(Closure excludeSpec) { - this.patternSet.exclude(excludeSpec); - } - - @Input - void testLogging(Closure closure) { - ConfigureUtil.configure(closure, testLoggingConfig) - } - - @Input - void balancers(Closure closure) { - ConfigureUtil.configure(closure, balancersConfig) - } - - @Input - void listeners(Closure closure) { - ConfigureUtil.configure(closure, listenersConfig) - } - - @Option( - option = "tests", - description = "Sets test class or method name to be included. This is for IDEs. Use -Dtests.class and -Dtests.method" - ) - void setTestNameIncludePattern(String testNamePattern) { - // This is only implemented to give support for IDEs running tests. There are 3 patterns expected: - // * An exact test class and method - // * An exact test class - // * A package name prefix, ending with .* - // There is no way to distinguish the first two without looking at classes, so we use the rule - // that class names start with an uppercase letter... - // TODO: this doesn't work yet, but not sure why...intellij says it is using --tests, and this work from the command line... - String[] parts = testNamePattern.split('\\.') - String lastPart = parts[parts.length - 1] - String classname - String methodname = null - if (lastPart.equals('*') || lastPart.charAt(0).isUpperCase()) { - // package name or class name, just pass through - classname = testNamePattern - } else { - // method name, need to separate - methodname = lastPart - classname = testNamePattern.substring(0, testNamePattern.length() - lastPart.length() - 1) - } - ant.setProperty('tests.class', classname) - if (methodname != null) { - ant.setProperty('tests.method', methodname) - } - } - - @TaskAction - void executeTests() { - Map attributes = [ - jvm: jvm, - parallelism: parallelism, - heartbeat: testLoggingConfig.slowTests.heartbeat, - dir: workingDir, - tempdir: new File(workingDir, 'temp'), - haltOnFailure: true, // we want to capture when a build failed, but will decide whether to rethrow later - shuffleOnSlave: shuffleOnSlave, - leaveTemporary: leaveTemporary, - ifNoTests: ifNoTests, - onNonEmptyWorkDirectory: onNonEmptyWorkDirectory, - newenvironment: true - ] - - DefaultLogger listener = null - ByteArrayOutputStream antLoggingBuffer = null - if (logger.isInfoEnabled() == false) { - // in info logging, ant already outputs info level, so we see everything - // but on errors or when debugging, we want to see info level messages - // because junit4 emits jvm output with ant logging - if (testLoggingConfig.outputMode == TestLoggingConfiguration.OutputMode.ALWAYS) { - // we want all output, so just stream directly - listener = new DefaultLogger( - errorPrintStream: System.err, - outputPrintStream: System.out, - messageOutputLevel: Project.MSG_INFO) - } else { - // we want to buffer the info, and emit it if the test fails - antLoggingBuffer = new ByteArrayOutputStream() - PrintStream stream = new PrintStream(antLoggingBuffer, true, "UTF-8") - listener = new DefaultLogger( - errorPrintStream: stream, - outputPrintStream: stream, - messageOutputLevel: Project.MSG_INFO) - } - project.ant.project.addBuildListener(listener) - } - - NamespaceBuilderSupport junit4 = NamespaceBuilder.newInstance(ant, 'junit4') - try { - junit4.junit4(attributes) { - classpath { - pathElement(path: classpath.asPath) - } - if (enableAssertions) { - jvmarg(value: '-ea') - } - if (enableSystemAssertions) { - jvmarg(value: '-esa') - } - for (String arg : jvmArgs) { - jvmarg(value: arg) - } - if (argLine != null) { - jvmarg(line: argLine) - } - testClassesDirs.each { testClassDir -> - fileset(dir: testClassDir) { - patternSet.getIncludes().each { include(name: it) } - patternSet.getExcludes().each { exclude(name: it) } - } - } - for (Map.Entry prop : systemProperties) { - if (prop.getKey().equals('tests.seed')) { - throw new InvalidUserDataException('Seed should be ' + - 'set on the project instead of a system property') - } - if (prop.getValue() instanceof Closure) { - sysproperty key: prop.getKey(), value: (prop.getValue() as Closure).call().toString() - } else { - sysproperty key: prop.getKey(), value: prop.getValue().toString() - } - } - systemProperty 'tests.seed', project.testSeed - for (Map.Entry envvar : environmentVariables) { - env key: envvar.getKey(), value: envvar.getValue().toString() - } - makeListeners() - } - } catch (BuildException e) { - if (antLoggingBuffer != null) { - logger.error('JUnit4 test failed, ant output was:') - logger.error(antLoggingBuffer.toString('UTF-8')) - } - if (haltOnFailure) { - throw e; - } - } - - if (listener != null) { - // remove the listener we added so other ant tasks dont have verbose logging! - project.ant.project.removeBuildListener(listener) - } - } - - static class ListenersElement extends UnknownElement { - AggregatedEventListener[] listeners - - ListenersElement() { - super('listeners') - setNamespace('junit4') - setQName('listeners') - } - - public void handleChildren(Object realThing, RuntimeConfigurable wrapper) { - assert realThing instanceof ListenersList - ListenersList list = (ListenersList)realThing - - for (AggregatedEventListener listener : listeners) { - list.addConfigured(listener) - } - } - } - - /** - * Makes an ant xml element for 'listeners' just as AntBuilder would, except configuring - * the element adds the already created children. - */ - def makeListeners() { - def context = ant.getAntXmlContext() - def parentWrapper = context.currentWrapper() - def parent = parentWrapper.getProxy() - UnknownElement element = new ListenersElement(listeners: listenersConfig.listeners) - element.setProject(context.getProject()) - element.setRealThing(logger) - ((UnknownElement)parent).addChild(element) - RuntimeConfigurable wrapper = new RuntimeConfigurable(element, element.getQName()) - parentWrapper.addChild(wrapper) - return wrapper.getProxy() - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy deleted file mode 100644 index 2705fdeaacb35..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/SlowTestsConfiguration.groovy +++ /dev/null @@ -1,14 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -class SlowTestsConfiguration { - int heartbeat = 0 - int summarySize = 0 - - void heartbeat(int heartbeat) { - this.heartbeat = heartbeat - } - - void summarySize(int summarySize) { - this.summarySize = summarySize - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy deleted file mode 100644 index 5e5610ab68e52..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/StackTraceFiltersConfiguration.groovy +++ /dev/null @@ -1,14 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -class StackTraceFiltersConfiguration { - List patterns = new ArrayList<>() - List contains = new ArrayList<>() - - void regex(String pattern) { - patterns.add(pattern) - } - - void contains(String contain) { - contains.add(contain) - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy deleted file mode 100644 index 97251252f54e2..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestLoggingConfiguration.groovy +++ /dev/null @@ -1,43 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import org.gradle.api.tasks.Input -import org.gradle.util.ConfigureUtil - -class TestLoggingConfiguration { - /** Display mode for output streams. */ - static enum OutputMode { - /** Always display the output emitted from tests. */ - ALWAYS, - /** - * Display the output only if a test/ suite failed. This requires internal buffering - * so the output will be shown only after a test completes. - */ - ONERROR, - /** Don't display the output, even on test failures. */ - NEVER - } - - OutputMode outputMode = OutputMode.ONERROR - SlowTestsConfiguration slowTests = new SlowTestsConfiguration() - StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration() - - /** Summarize the first N failures at the end of the test. */ - @Input - int showNumFailuresAtEnd = 3 // match TextReport default - - void slowTests(Closure closure) { - ConfigureUtil.configure(closure, slowTests) - } - - void stackTraceFilters(Closure closure) { - ConfigureUtil.configure(closure, stackTraceFilters) - } - - void outputMode(String mode) { - outputMode = mode.toUpperCase() as OutputMode - } - - void showNumFailuresAtEnd(int n) { - showNumFailuresAtEnd = n - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy deleted file mode 100644 index 005e43b9db434..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe -import com.carrotsearch.ant.tasks.junit4.events.TestStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap -import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import org.gradle.internal.logging.progress.ProgressLogger -import org.gradle.internal.logging.progress.ProgressLoggerFactory - -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAILURE -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION -import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK - -/** - * Adapts junit4's event listeners into gradle's ProgressLogger. Note that - * junit4 guarantees (via guava) that methods on this class won't be called by - * multiple threads simultaneously which is helpful in making it simpler. - * - * Every time a test finishes this class will update the logger. It will log - * the last finished test method on the logger line until the first suite - * finishes. Once the first suite finishes it always logs the last finished - * suite. This means that in test runs with a single suite the logger will be - * updated with the test name the whole time which is useful because these runs - * usually have longer individual tests. For test runs with lots of suites the - * majority of the time is spent showing the last suite that finished which is - * more useful for those test runs because test methods there tend to be very - * quick. - */ -class TestProgressLogger implements AggregatedEventListener { - /** Factory to build a progress logger when testing starts */ - ProgressLoggerFactory factory - ProgressLogger parentProgressLogger - ProgressLogger suiteLogger - ProgressLogger testLogger - ProgressLogger[] slaveLoggers - int totalSuites - int totalSlaves - - // Counters incremented test completion. - volatile int suitesCompleted = 0 - volatile int testsCompleted = 0 - volatile int testsFailed = 0 - volatile int testsIgnored = 0 - - @Subscribe - void onStart(AggregatedStartEvent e) throws IOException { - totalSuites = e.suiteCount - totalSlaves = e.slaveCount - parentProgressLogger = factory.newOperation(TestProgressLogger) - parentProgressLogger.setDescription('Randomized test runner') - parentProgressLogger.started() - - suiteLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) - suiteLogger.setDescription('Suite logger') - suiteLogger.started("Suites: 0/" + totalSuites) - testLogger = factory.newOperation(TestProgressLogger, parentProgressLogger) - testLogger.setDescription('Test logger') - testLogger.started('Tests: completed: 0, failed: 0, ignored: 0') - slaveLoggers = new ProgressLogger[e.slaveCount] - for (int i = 0; i < e.slaveCount; ++i) { - slaveLoggers[i] = factory.newOperation(TestProgressLogger, parentProgressLogger) - slaveLoggers[i].setDescription("J${i} test logger") - slaveLoggers[i].started("J${i}: initializing...") - } - } - - @Subscribe - void onChildBootstrap(ChildBootstrap e) throws IOException { - slaveLoggers[e.getSlave().id].progress("J${e.slave.id}: starting (pid ${e.slave.pidString})") - } - - @Subscribe - void onQuit(AggregatedQuitEvent e) throws IOException { - // if onStart was never called (eg no matching tests), suiteLogger and all the other loggers will be null - if (suiteLogger != null) { - suiteLogger.completed() - testLogger.completed() - for (ProgressLogger slaveLogger : slaveLoggers) { - slaveLogger.completed() - } - parentProgressLogger.completed() - } - } - - @Subscribe - void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { - String suiteName = simpleName(e.suiteStartedEvent.description.className) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${suiteName} - initializing") - } - - @Subscribe - void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { - suitesCompleted++ - suiteLogger.progress("Suites: " + suitesCompleted + "/" + totalSuites) - } - - @Subscribe - void onTestResult(AggregatedTestResultEvent e) throws IOException { - String statusMessage - testsCompleted++ - switch (e.status) { - case ERROR: - case FAILURE: - testsFailed++ - statusMessage = "failed" - break - case IGNORED: - case IGNORED_ASSUMPTION: - testsIgnored++ - statusMessage = "ignored" - break - case OK: - String time = formatDurationInSeconds(e.executionTime) - statusMessage = "completed [${time}]" - break - default: - throw new IllegalArgumentException("Unknown test status: [${e.status}]") - } - testLogger.progress("Tests: completed: ${testsCompleted}, failed: ${testsFailed}, ignored: ${testsIgnored}") - String testName = simpleName(e.description.className) + '.' + e.description.methodName - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ${statusMessage}") - } - - @Subscribe - void onTestStarted(TestStartedEvent e) throws IOException { - String testName = simpleName(e.description.className) + '.' + e.description.methodName - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} ...") - } - - @Subscribe - void onHeartbeat(HeartBeatEvent e) throws IOException { - String testName = simpleName(e.description.className) + '.' + e.description.methodName - String time = formatDurationInSeconds(e.getNoEventDuration()) - slaveLoggers[e.slave.id].progress("J${e.slave.id}: ${testName} stalled for ${time}") - } - - /** - * Extract a Class#getSimpleName style name from Class#getName style - * string. We can't just use Class#getSimpleName because junit descriptions - * don't always set the class field but they always set the className - * field. - */ - private static String simpleName(String className) { - return className.substring(className.lastIndexOf('.') + 1) - } - - @Override - void setOuter(JUnit4 junit) {} -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy deleted file mode 100644 index 6ed6ecf86196d..0000000000000 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy +++ /dev/null @@ -1,369 +0,0 @@ -package com.carrotsearch.gradle.junit4 - -import com.carrotsearch.ant.tasks.junit4.JUnit4 -import com.carrotsearch.ant.tasks.junit4.Pluralize -import com.carrotsearch.ant.tasks.junit4.TestsSummaryEventListener -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.base.Strings -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe -import com.carrotsearch.ant.tasks.junit4.events.EventType -import com.carrotsearch.ant.tasks.junit4.events.IEvent -import com.carrotsearch.ant.tasks.junit4.events.IStreamEvent -import com.carrotsearch.ant.tasks.junit4.events.SuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.TestFinishedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap -import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.PartialOutputEvent -import com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus -import com.carrotsearch.ant.tasks.junit4.events.mirrors.FailureMirror -import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import com.carrotsearch.ant.tasks.junit4.listeners.StackTraceFilter -import org.apache.tools.ant.filters.TokenFilter -import org.gradle.api.logging.LogLevel -import org.gradle.api.logging.Logger -import org.junit.runner.Description - -import java.util.concurrent.atomic.AtomicInteger - -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds -import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatTime -import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode - -class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener { - - static final String FAILURE_MARKER = " <<< FAILURES!" - - /** Status names column. */ - static EnumMap statusNames; - static { - statusNames = new EnumMap<>(TestStatus.class); - for (TestStatus s : TestStatus.values()) { - statusNames.put(s, - s == TestStatus.IGNORED_ASSUMPTION - ? "IGNOR/A" : s.toString()); - } - } - - JUnit4 owner - - /** Logger to write the report to */ - Logger logger - - TestLoggingConfiguration config - - /** Forked concurrent JVM count. */ - int forkedJvmCount - - /** Format line for JVM ID string. */ - String jvmIdFormat - - /** Output stream that logs messages to the given logger */ - LoggingOutputStream outStream - LoggingOutputStream errStream - - /** A list of failed tests, if to be displayed at the end. */ - List failedTests = new ArrayList<>() - - /** Stack trace filters. */ - StackTraceFilter stackFilter = new StackTraceFilter() - - Map suiteTimes = new HashMap<>() - boolean slowTestsFound = false - - int totalSuites - AtomicInteger suitesCompleted = new AtomicInteger() - - @Subscribe - void onStart(AggregatedStartEvent e) throws IOException { - this.totalSuites = e.getSuiteCount(); - StringBuilder info = new StringBuilder('==> Test Info: ') - info.append('seed=' + owner.getSeed() + '; ') - info.append(Pluralize.pluralize(e.getSlaveCount(), 'jvm') + '=' + e.getSlaveCount() + '; ') - info.append(Pluralize.pluralize(e.getSuiteCount(), 'suite') + '=' + e.getSuiteCount()) - logger.lifecycle(info.toString()) - - forkedJvmCount = e.getSlaveCount(); - jvmIdFormat = " J%-" + (1 + (int) Math.floor(Math.log10(forkedJvmCount))) + "d"; - - outStream = new LoggingOutputStream(logger: logger, level: LogLevel.LIFECYCLE, prefix: " 1> ") - errStream = new LoggingOutputStream(logger: logger, level: LogLevel.ERROR, prefix: " 2> ") - - for (String contains : config.stackTraceFilters.contains) { - TokenFilter.ContainsString containsFilter = new TokenFilter.ContainsString() - containsFilter.setContains(contains) - stackFilter.addContainsString(containsFilter) - } - for (String pattern : config.stackTraceFilters.patterns) { - TokenFilter.ContainsRegex regexFilter = new TokenFilter.ContainsRegex() - regexFilter.setPattern(pattern) - stackFilter.addContainsRegex(regexFilter) - } - } - - @Subscribe - void onChildBootstrap(ChildBootstrap e) throws IOException { - logger.info("Started J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + ")."); - } - - @Subscribe - void onHeartbeat(HeartBeatEvent e) throws IOException { - logger.warn("HEARTBEAT J" + e.getSlave().id + " PID(" + e.getSlave().getPidString() + "): " + - formatTime(e.getCurrentTime()) + ", stalled for " + - formatDurationInSeconds(e.getNoEventDuration()) + " at: " + - (e.getDescription() == null ? "" : formatDescription(e.getDescription()))) - slowTestsFound = true - } - - @Subscribe - void onQuit(AggregatedQuitEvent e) throws IOException { - if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { - List sublist = this.failedTests - StringBuilder b = new StringBuilder() - b.append('Tests with failures') - if (sublist.size() > config.showNumFailuresAtEnd) { - sublist = sublist.subList(0, config.showNumFailuresAtEnd) - b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")") - } - b.append(':\n') - for (Description description : sublist) { - b.append(" - ").append(formatDescription(description, true)).append('\n') - } - logger.warn(b.toString()) - } - if (config.slowTests.summarySize > 0) { - List> sortedSuiteTimes = new ArrayList<>(suiteTimes.entrySet()) - Collections.sort(sortedSuiteTimes, new Comparator>() { - @Override - int compare(Map.Entry o1, Map.Entry o2) { - return o2.value - o1.value // sort descending - } - }) - LogLevel level = slowTestsFound ? LogLevel.WARN : LogLevel.INFO - int numToLog = Math.min(config.slowTests.summarySize, sortedSuiteTimes.size()) - logger.log(level, 'Slow Tests Summary:') - for (int i = 0; i < numToLog; ++i) { - logger.log(level, String.format(Locale.ENGLISH, '%6.2fs | %s', - sortedSuiteTimes.get(i).value / 1000.0, - sortedSuiteTimes.get(i).key)); - } - logger.log(level, '') // extra vertical separation - } - if (failedTests.isEmpty()) { - // summary is already printed for failures - logger.lifecycle('==> Test Summary: ' + getResult().toString()) - } - } - - @Subscribe - void onSuiteStart(AggregatedSuiteStartedEvent e) throws IOException { - if (isPassthrough()) { - SuiteStartedEvent evt = e.getSuiteStartedEvent(); - emitSuiteStart(LogLevel.LIFECYCLE, evt.getDescription()); - } - } - - @Subscribe - void onOutput(PartialOutputEvent e) throws IOException { - if (isPassthrough()) { - // We only allow passthrough output if there is one JVM. - switch (e.getEvent().getType()) { - case EventType.APPEND_STDERR: - ((IStreamEvent) e.getEvent()).copyTo(errStream); - break; - case EventType.APPEND_STDOUT: - ((IStreamEvent) e.getEvent()).copyTo(outStream); - break; - default: - break; - } - } - } - - @Subscribe - void onTestResult(AggregatedTestResultEvent e) throws IOException { - if (isPassthrough() && e.getStatus() != TestStatus.OK) { - flushOutput(); - emitStatusLine(LogLevel.ERROR, e, e.getStatus(), e.getExecutionTime()); - } - - if (!e.isSuccessful()) { - failedTests.add(e.getDescription()); - } - } - - @Subscribe - void onSuiteResult(AggregatedSuiteResultEvent e) throws IOException { - final int completed = suitesCompleted.incrementAndGet(); - - if (e.isSuccessful() && e.getTests().isEmpty()) { - return; - } - if (config.slowTests.summarySize > 0) { - suiteTimes.put(e.getDescription().getDisplayName(), e.getExecutionTime()) - } - - LogLevel level = e.isSuccessful() && config.outputMode != OutputMode.ALWAYS ? LogLevel.INFO : LogLevel.LIFECYCLE - - // We must emit buffered test and stream events (in case of failures). - if (!isPassthrough()) { - emitSuiteStart(level, e.getDescription()) - emitBufferedEvents(level, e) - } - - // Emit a synthetic failure for suite-level errors, if any. - if (!e.getFailures().isEmpty()) { - emitStatusLine(level, e, TestStatus.ERROR, 0) - } - - if (!e.getFailures().isEmpty()) { - failedTests.add(e.getDescription()) - } - - emitSuiteEnd(level, e, completed) - } - - /** Suite prologue. */ - void emitSuiteStart(LogLevel level, Description description) throws IOException { - logger.log(level, 'Suite: ' + description.getDisplayName()); - } - - void emitBufferedEvents(LogLevel level, AggregatedSuiteResultEvent e) throws IOException { - if (config.outputMode == OutputMode.NEVER) { - return - } - - final IdentityHashMap eventMap = new IdentityHashMap<>(); - for (AggregatedTestResultEvent tre : e.getTests()) { - eventMap.put(tre.getTestFinishedEvent(), tre) - } - - final boolean emitOutput = config.outputMode == OutputMode.ALWAYS && isPassthrough() == false || - config.outputMode == OutputMode.ONERROR && e.isSuccessful() == false - - for (IEvent event : e.getEventStream()) { - switch (event.getType()) { - case EventType.APPEND_STDOUT: - if (emitOutput) ((IStreamEvent) event).copyTo(outStream); - break; - - case EventType.APPEND_STDERR: - if (emitOutput) ((IStreamEvent) event).copyTo(errStream); - break; - - case EventType.TEST_FINISHED: - assert eventMap.containsKey(event) - final AggregatedTestResultEvent aggregated = eventMap.get(event); - if (aggregated.getStatus() != TestStatus.OK) { - flushOutput(); - emitStatusLine(level, aggregated, aggregated.getStatus(), aggregated.getExecutionTime()); - } - - default: - break; - } - } - - if (emitOutput) { - flushOutput() - } - } - - void emitSuiteEnd(LogLevel level, AggregatedSuiteResultEvent e, int suitesCompleted) throws IOException { - - final StringBuilder b = new StringBuilder(); - b.append(String.format(Locale.ENGLISH, 'Completed [%d/%d]%s in %.2fs, ', - suitesCompleted, - totalSuites, - e.getSlave().slaves > 1 ? ' on J' + e.getSlave().id : '', - e.getExecutionTime() / 1000.0d)); - b.append(e.getTests().size()).append(Pluralize.pluralize(e.getTests().size(), ' test')); - - int failures = e.getFailureCount(); - if (failures > 0) { - b.append(', ').append(failures).append(Pluralize.pluralize(failures, ' failure')); - } - - int errors = e.getErrorCount(); - if (errors > 0) { - b.append(', ').append(errors).append(Pluralize.pluralize(errors, ' error')); - } - - int ignored = e.getIgnoredCount(); - if (ignored > 0) { - b.append(', ').append(ignored).append(' skipped'); - } - - if (!e.isSuccessful()) { - b.append(' <<< FAILURES!'); - } - - b.append('\n') - logger.log(level, b.toString()); - } - - /** Emit status line for an aggregated event. */ - void emitStatusLine(LogLevel level, AggregatedResultEvent result, TestStatus status, long timeMillis) throws IOException { - final StringBuilder line = new StringBuilder(); - - line.append(Strings.padEnd(statusNames.get(status), 8, ' ' as char)) - line.append(formatDurationInSeconds(timeMillis)) - if (forkedJvmCount > 1) { - line.append(String.format(Locale.ENGLISH, jvmIdFormat, result.getSlave().id)) - } - line.append(' | ') - - line.append(formatDescription(result.getDescription())) - if (!result.isSuccessful()) { - line.append(FAILURE_MARKER) - } - logger.log(level, line.toString()) - - PrintWriter writer = new PrintWriter(new LoggingOutputStream(logger: logger, level: level, prefix: ' > ')) - - if (status == TestStatus.IGNORED && result instanceof AggregatedTestResultEvent) { - writer.write('Cause: ') - writer.write(((AggregatedTestResultEvent) result).getCauseForIgnored()) - writer.flush() - } - - final List failures = result.getFailures(); - if (!failures.isEmpty()) { - int count = 0; - for (FailureMirror fm : failures) { - count++; - if (fm.isAssumptionViolation()) { - writer.write(String.format(Locale.ENGLISH, - 'Assumption #%d: %s', - count, fm.getMessage() == null ? '(no message)' : fm.getMessage())); - } else { - writer.write(String.format(Locale.ENGLISH, - 'Throwable #%d: %s', - count, - stackFilter.apply(fm.getTrace()))); - } - } - writer.flush() - } - } - - void flushOutput() throws IOException { - outStream.flush() - errStream.flush() - } - - /** Returns true if output should be logged immediately. */ - boolean isPassthrough() { - return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS - } - - @Override - void setOuter(JUnit4 task) { - owner = task - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index f240ebb52c8ba..28b0c2977f9e2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -18,13 +18,13 @@ */ package org.elasticsearch.gradle -import com.carrotsearch.gradle.junit4.RandomizedTestingTask import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.apache.commons.io.IOUtils import org.apache.tools.ant.taskdefs.condition.Os import org.eclipse.jgit.lib.Constants import org.eclipse.jgit.lib.RepositoryBuilder import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.elasticsearch.gradle.test.ErrorReportingTestListener import org.gradle.api.GradleException import org.gradle.api.InvalidUserDataException import org.gradle.api.JavaVersion @@ -39,8 +39,12 @@ import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.artifacts.repositories.ArtifactRepository +import org.gradle.api.artifacts.repositories.IvyArtifactRepository +import org.gradle.api.artifacts.repositories.MavenArtifactRepository +import org.gradle.api.credentials.HttpHeaderCredentials +import org.gradle.api.execution.TaskActionListener import org.gradle.api.execution.TaskExecutionGraph -import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaPlugin import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin @@ -50,6 +54,8 @@ import org.gradle.api.tasks.bundling.Jar import org.gradle.api.tasks.compile.GroovyCompile import org.gradle.api.tasks.compile.JavaCompile import org.gradle.api.tasks.javadoc.Javadoc +import org.gradle.api.tasks.testing.Test +import org.gradle.authentication.http.HttpHeaderAuthentication import org.gradle.internal.jvm.Jvm import org.gradle.process.ExecResult import org.gradle.process.ExecSpec @@ -81,7 +87,6 @@ class BuildPlugin implements Plugin { ) } project.pluginManager.apply('java') - project.pluginManager.apply('carrotsearch.randomized-testing') configureConfigurations(project) configureJars(project) // jar config must be added before info broker // these plugins add lots of info to our jars @@ -91,8 +96,12 @@ class BuildPlugin implements Plugin { project.pluginManager.apply('nebula.info-scm') project.pluginManager.apply('nebula.info-jar') + // apply global test task failure listener + project.rootProject.pluginManager.apply(TestFailureReportingPlugin) + project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) + setupSeed(project) globalBuildInfo(project) configureRepositories(project) project.ext.versions = VersionProperties.versions @@ -101,11 +110,25 @@ class BuildPlugin implements Plugin { configureJavadoc(project) configureSourcesJar(project) configurePomGeneration(project) - - applyCommonTestConfig(project) - configureTest(project) + configureTestTasks(project) configurePrecommit(project) configureDependenciesInfo(project) + + // Common config when running with a FIPS-140 runtime JVM + // Need to do it here to support external plugins + if (project.ext.inFipsJvm) { + project.tasks.withType(Test) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + project.pluginManager.withPlugin("elasticsearch.testclusters") { + project.testClusters.all { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + } + } + } @@ -211,6 +234,7 @@ class BuildPlugin implements Plugin { project.rootProject.ext.runtimeJavaHome = runtimeJavaHome project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum + project.rootProject.ext.isRuntimeJavaHomeSet = compilerJavaHome.equals(runtimeJavaHome) == false project.rootProject.ext.javaVersions = javaVersions project.rootProject.ext.buildChecksDone = true project.rootProject.ext.minimumCompilerVersion = minimumCompilerVersion @@ -229,6 +253,7 @@ class BuildPlugin implements Plugin { project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion + project.ext.isRuntimeJavaHomeSet = project.rootProject.ext.isRuntimeJavaHomeSet project.ext.javaVersions = project.rootProject.ext.javaVersions project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion @@ -557,6 +582,16 @@ class BuildPlugin implements Plugin { /** Adds repositories used by ES dependencies */ static void configureRepositories(Project project) { + project.getRepositories().all { repository -> + if (repository instanceof MavenArtifactRepository) { + final MavenArtifactRepository maven = (MavenArtifactRepository) repository + assertRepositoryURIUsesHttps(maven, project, maven.getUrl()) + repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(project, uri) } + } else if (repository instanceof IvyArtifactRepository) { + final IvyArtifactRepository ivy = (IvyArtifactRepository) repository + assertRepositoryURIUsesHttps(ivy, project, ivy.getUrl()) + } + } RepositoryHandler repos = project.repositories if (System.getProperty("repos.mavenLocal") != null) { // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is @@ -566,10 +601,19 @@ class BuildPlugin implements Plugin { } repos.jcenter() repos.ivy { + name "elasticsearch" url "https://artifacts.elastic.co/downloads" patternLayout { artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" } + // this header is not a credential but we hack the capability to send this header to avoid polluting our download stats + credentials(HttpHeaderCredentials) { + name = "X-Elastic-No-KPI" + value = "1" + } + authentication { + header(HttpHeaderAuthentication) + } } repos.maven { name "elastic" @@ -581,11 +625,17 @@ class BuildPlugin implements Plugin { String revision = (luceneVersion =~ /\w+-snapshot-([a-z0-9]+)/)[0][1] repos.maven { name 'lucene-snapshots' - url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" + url "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" } } } + private static void assertRepositoryURIUsesHttps(final ArtifactRepository repository, final Project project, final URI uri) { + if (uri != null && uri.toURL().getProtocol().equals("http")) { + throw new GradleException("repository [${repository.name}] on project with path [${project.path}] is using http for artifacts on [${uri.toURL()}]") + } + } + /** * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * @@ -900,128 +950,107 @@ class BuildPlugin implements Plugin { } } - static void applyCommonTestConfig(Project project) { - project.tasks.withType(RandomizedTestingTask) {task -> - jvm "${project.runtimeJavaHome}/bin/java" - parallelism System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) - ifNoTests 'fail' - onNonEmptyWorkDirectory 'wipe' - leaveTemporary true - project.sourceSets.matching { it.name == "test" }.all { test -> - task.testClassesDirs = test.output.classesDirs - task.classpath = test.runtimeClasspath - } - group = JavaBasePlugin.VERIFICATION_GROUP - dependsOn 'testClasses' - - // Make sure all test tasks are configured properly - if (name != "test") { - project.tasks.matching { it.name == "test"}.all { testTask -> - task.shouldRunAfter testTask - } - } - if (name == "unitTest") { - include("**/*Tests.class") - } + static void configureTestTasks(Project project) { + // Default test task should run only unit tests + project.tasks.withType(Test).matching { it.name == 'test' }.all { + include '**/*Tests.class' + } - // TODO: why are we not passing maxmemory to junit4? - jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m') - jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m') - jvmArg '-XX:+HeapDumpOnOutOfMemoryError' + // none of this stuff is applicable to the `:buildSrc` project tests + if (project.path != ':build-tools') { File heapdumpDir = new File(project.buildDir, 'heapdump') - heapdumpDir.mkdirs() - jvmArg '-XX:HeapDumpPath=' + heapdumpDir - if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { - jvmArg '--illegal-access=warn' - } - argLine System.getProperty('tests.jvm.argline') - - // we use './temp' since this is per JVM and tests are forbidden from writing to CWD - systemProperty 'java.io.tmpdir', './temp' - systemProperty 'java.awt.headless', 'true' - systemProperty 'tests.gradle', 'true' - systemProperty 'tests.artifact', project.name - systemProperty 'tests.task', path - systemProperty 'tests.security.manager', 'true' - systemProperty 'jna.nosys', 'true' - systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion() - if (project.ext.inFipsJvm) { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" - } else { - systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() - } - // TODO: remove setting logging level via system property - systemProperty 'tests.logger.level', 'WARN' - for (Map.Entry property : System.properties.entrySet()) { - if (property.getKey().startsWith('tests.') || - property.getKey().startsWith('es.')) { - if (property.getKey().equals('tests.seed')) { - /* The seed is already set on the project so we - * shouldn't attempt to override it. */ - continue; - } - systemProperty property.getKey(), property.getValue() + + project.tasks.withType(Test) { Test test -> + File testOutputDir = new File(test.reports.junitXml.getDestination(), "output") + + doFirst { + project.mkdir(testOutputDir) + project.mkdir(heapdumpDir) + project.mkdir(test.workingDir) } - } - // TODO: remove this once ctx isn't added to update script params in 7.0 - systemProperty 'es.scripting.update.ctx_in_params', 'false' + def listener = new ErrorReportingTestListener(test.testLogging, testOutputDir) + test.extensions.add(ErrorReportingTestListener, 'errorReportingTestListener', listener) + addTestOutputListener(listener) + addTestListener(listener) - // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM - if (project.inFipsJvm) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } + executable = "${project.runtimeJavaHome}/bin/java" + workingDir = project.file("${project.buildDir}/testrun/${test.name}") + maxParallelForks = project.rootProject.ext.defaultParallel + + exclude '**/*$*.class' - boolean assertionsEnabled = Boolean.parseBoolean(System.getProperty('tests.asserts', 'true')) - enableSystemAssertions assertionsEnabled - enableAssertions assertionsEnabled + jvmArgs "-Xmx${System.getProperty('tests.heap.size', '512m')}", + "-Xms${System.getProperty('tests.heap.size', '512m')}", + '-XX:+HeapDumpOnOutOfMemoryError', + "-XX:HeapDumpPath=$heapdumpDir" - testLogging { - showNumFailuresAtEnd 25 - slowTests { - heartbeat 10 - summarySize 5 + if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9) { + jvmArgs '--illegal-access=warn' } - stackTraceFilters { - // custom filters: we carefully only omit test infra noise here - contains '.SlaveMain.' - regex(/^(\s+at )(org\.junit\.)/) - // also includes anonymous classes inside these two: - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.RandomizedRunner)/) - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.ThreadLeakControl)/) - regex(/^(\s+at )(com\.carrotsearch\.randomizedtesting\.rules\.)/) - regex(/^(\s+at )(org\.apache\.lucene\.util\.TestRule)/) - regex(/^(\s+at )(org\.apache\.lucene\.util\.AbstractBeforeAfterRule)/) + + if (System.getProperty('tests.jvm.argline')) { + jvmArgs System.getProperty('tests.jvm.argline').split(" ") } - if (System.getProperty('tests.class') != null && System.getProperty('tests.output') == null) { - // if you are debugging, you want to see the output! - outputMode 'always' + + if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { + jvmArgs '-ea', '-esa' + } + + // we use './temp' since this is per JVM and tests are forbidden from writing to CWD + systemProperties 'gradle.dist.lib': new File(project.class.location.toURI()).parent, + 'gradle.worker.jar': "${project.gradle.getGradleUserHomeDir()}/caches/${project.gradle.gradleVersion}/workerMain/gradle-worker.jar", + 'gradle.user.home': project.gradle.getGradleUserHomeDir(), + 'java.io.tmpdir': './temp', + 'java.awt.headless': 'true', + 'tests.gradle': 'true', + 'tests.artifact': project.name, + 'tests.task': path, + 'tests.security.manager': 'true', + 'tests.seed': project.testSeed, + 'jna.nosys': 'true', + 'compiler.java': project.ext.compilerJavaVersion.getMajorVersion() + + if (project.ext.inFipsJvm) { + systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + "FIPS" } else { - outputMode System.getProperty('tests.output', 'onerror') + systemProperty 'runtime.java', project.ext.runtimeJavaVersion.getMajorVersion() + } + // TODO: remove setting logging level via system property + systemProperty 'tests.logger.level', 'WARN' + System.getProperties().each { key, value -> + if ((key.startsWith('tests.') || key.startsWith('es.'))) { + systemProperty key, value + } } - } - balancers { - executionTime cacheFilename: ".local-${project.version}-${name}-execution-times.log" - } + // TODO: remove this once ctx isn't added to update script params in 7.0 + systemProperty 'es.scripting.update.ctx_in_params', 'false' - listeners { - junitReport() - } + // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM + if (project.inFipsJvm) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + + testLogging { + showExceptions = true + showCauses = true + exceptionFormat = 'full' + } - exclude '**/*$*.class' + project.plugins.withType(ShadowPlugin).whenPluginAdded { + // Test against a shadow jar if we made one + classpath -= project.tasks.compileJava.outputs.files + classpath += project.tasks.shadowJar.outputs.files - project.plugins.withType(ShadowPlugin).whenPluginAdded { - // Test against a shadow jar if we made one - classpath -= project.tasks.compileJava.outputs.files - classpath += project.tasks.shadowJar.outputs.files - dependsOn project.tasks.shadowJar + dependsOn project.tasks.shadowJar + } } } } - private static String findDefaultParallel(Project project) { + private static int findDefaultParallel(Project project) { if (project.file("/proc/cpuinfo").exists()) { // Count physical cores on any Linux distro ( don't count hyper-threading ) Map socketToCore = [:] @@ -1042,7 +1071,7 @@ class BuildPlugin implements Plugin { } } }) - return socketToCore.values().sum().toString(); + return socketToCore.values().sum() } else if ('Mac OS X'.equals(System.getProperty('os.name'))) { // Ask macOS to count physical CPUs for us ByteArrayOutputStream stdout = new ByteArrayOutputStream() @@ -1051,16 +1080,9 @@ class BuildPlugin implements Plugin { args '-n', 'hw.physicalcpu' standardOutput = stdout } - return stdout.toString('UTF-8').trim(); - } - return 'auto'; - } - - /** Configures the test task */ - static Task configureTest(Project project) { - project.tasks.getByName('test') { - include '**/*Tests.class' + return Integer.parseInt(stdout.toString('UTF-8').trim()) } + return Runtime.getRuntime().availableProcessors() / 2 } private static configurePrecommit(Project project) { @@ -1090,4 +1112,58 @@ class BuildPlugin implements Plugin { deps.mappings = project.dependencyLicenses.mappings } } + + /** + * Pins the test seed at configuration time so it isn't different on every + * {@link Test} execution. This is useful if random + * decisions in one run of {@linkplain Test} influence the + * outcome of subsequent runs. Pinning the seed up front like this makes + * the reproduction line from one run be useful on another run. + */ + static String setupSeed(Project project) { + if (project.rootProject.ext.has('testSeed')) { + /* Skip this if we've already pinned the testSeed. It is important + * that this checks the rootProject so that we know we've only ever + * initialized one time. */ + return project.rootProject.ext.testSeed + } + + String testSeed = System.getProperty('tests.seed') + if (testSeed == null) { + long seed = new Random(System.currentTimeMillis()).nextLong() + testSeed = Long.toUnsignedString(seed, 16).toUpperCase(Locale.ROOT) + } + + project.rootProject.ext.testSeed = testSeed + return testSeed + } + + private static class TestFailureReportingPlugin implements Plugin { + @Override + void apply(Project project) { + if (project != project.rootProject) { + throw new IllegalStateException("${this.class.getName()} can only be applied to the root project.") + } + + project.gradle.addListener(new TaskActionListener() { + @Override + void beforeActions(Task task) { + + } + + @Override + void afterActions(Task task) { + if (task instanceof Test) { + ErrorReportingTestListener listener = task.extensions.findByType(ErrorReportingTestListener) + if (listener != null && listener.getFailedTests().size() > 0) { + task.logger.lifecycle("\nTests with failures:") + listener.getFailedTests().each { + task.logger.lifecycle(" - ${it.getFullName()}") + } + } + } + } + }) + } + } } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy similarity index 97% rename from buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy rename to buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy index ce0995a5a8c70..e2e2b7c954482 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/LoggingOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy @@ -1,4 +1,4 @@ -package com.carrotsearch.gradle.junit4 +package org.elasticsearch.gradle import org.gradle.api.logging.LogLevel import org.gradle.api.logging.Logger diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index ce76ad5d28fc6..a0ce24e45c729 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.doc import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.test.ClusterFormationTasks import org.elasticsearch.gradle.test.RestTestPlugin import org.gradle.api.Project import org.gradle.api.Task @@ -46,6 +47,7 @@ public class DocsTestPlugin extends RestTestPlugin { '\\{version_qualified\\}': VersionProperties.elasticsearch, '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), '\\{build_flavor\\}' : project.integTestCluster.distribution, + '\\{build_type\\}' : ClusterFormationTasks.getOs().equals("windows") ? "zip" : "tar", ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) listSnippets.group 'Docs' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index d56cb1926f3e2..439a60e6c3aaf 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -104,7 +104,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * format of the response is incompatible i.e. it is not a JSON object. */ static shouldAddShardFailureCheck(String path) { - return path.startsWith('_cat') == false && path.startsWith('_ml/datafeeds/') == false + return path.startsWith('_cat') == false && path.startsWith('_ml/datafeeds/') == false } /** @@ -294,7 +294,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { } void emitDo(String method, String pathAndQuery, String body, - String catchPart, List warnings, boolean inSetup) { + String catchPart, List warnings, boolean inSetup, boolean skipShardFailures) { def (String path, String query) = pathAndQuery.tokenize('?') if (path == null) { path = '' // Catch requests to the root... @@ -346,7 +346,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * section so we have to skip it there. We also omit the assertion * from APIs that don't return a JSON object */ - if (false == inSetup && shouldAddShardFailureCheck(path)) { + if (false == inSetup && skipShardFailures == false && shouldAddShardFailureCheck(path)) { current.println(" - is_false: _shards.failures") } } @@ -394,7 +394,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { pathAndQuery = pathAndQuery.substring(1) } emitDo(method, pathAndQuery, body, catchPart, snippet.warnings, - inSetup) + inSetup, snippet.skipShardsFailures) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index fbc231aa764dc..c1dbddd9e9d52 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -43,9 +43,9 @@ public class SnippetsTask extends DefaultTask { private static final String SKIP = /skip:([^\]]+)/ private static final String SETUP = /setup:([^ \]]+)/ private static final String WARNING = /warning:(.+)/ - private static final String CAT = /(_cat)/ + private static final String NON_JSON = /(non_json)/ private static final String TEST_SYNTAX = - /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/ + /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING|(skip_shard_failures)) ?/ /** * Action to take on each snippet. Called with a single parameter, an @@ -233,6 +233,10 @@ public class SnippetsTask extends DefaultTask { snippet.warnings.add(it.group(7)) return } + if (it.group(8) != null) { + snippet.skipShardsFailures = true + return + } throw new InvalidUserDataException( "Invalid test marker: $line") } @@ -251,12 +255,12 @@ public class SnippetsTask extends DefaultTask { substitutions = [] } String loc = "$file:$lineNumber" - parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$CAT|$SKIP) ?/) { + parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$NON_JSON|$SKIP) ?/) { if (it.group(1) != null) { // TESTRESPONSE[s/adsf/jkl/] substitutions.add([it.group(1), it.group(2)]) } else if (it.group(3) != null) { - // TESTRESPONSE[_cat] + // TESTRESPONSE[non_json] substitutions.add(['^', '/']) substitutions.add(['\n$', '\\\\s*/']) substitutions.add(['( +)', '$1\\\\s+']) @@ -329,6 +333,7 @@ public class SnippetsTask extends DefaultTask { String setup = null boolean curl List warnings = new ArrayList() + boolean skipShardsFailures = false @Override public String toString() { @@ -359,6 +364,9 @@ public class SnippetsTask extends DefaultTask { for (String warning in warnings) { result += "[warning:$warning]" } + if (skipShardsFailures) { + result += '[skip_shard_failures]' + } } if (testResponse) { result += '// TESTRESPONSE' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 0a53787c10597..0053c0a40b4d8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -118,11 +118,16 @@ class ClusterConfiguration { if (seedNode == node) { return null } - ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', + timeoutproperty: "failed.${seedNode.transportPortsFile.path}") { resourceexists { file(file: seedNode.transportPortsFile.toString()) } } + if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) { + throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " + + "timed out waiting for it to be created after 40 seconds") + } return seedNode.transportUri() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 7a0a7d9436ee2..7c7ff964312f0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -194,7 +194,7 @@ class ClusterFormationTasks { } Version version = Version.fromString(elasticsearchVersion) String os = getOs() - String classifier = "${os}-x86_64" + String classifier = "-${os}-x86_64" String packaging = os.equals('windows') ? 'zip' : 'tar.gz' String artifactName = 'elasticsearch' if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { @@ -204,6 +204,7 @@ class ClusterFormationTasks { String snapshotProject = "${os}-${os.equals('windows') ? 'zip' : 'tar'}" if (version.before("7.0.0")) { snapshotProject = "zip" + packaging = "zip" } if (distro.equals("oss")) { snapshotProject = "oss-" + snapshotProject @@ -274,7 +275,7 @@ class ClusterFormationTasks { } setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution) + setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution, config.distribution) setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, writeConfig) setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) @@ -304,6 +305,12 @@ class ClusterFormationTasks { // its run after plugins have been installed, as the extra config files may belong to plugins setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) + // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected + if (project.inFipsJvm){ + node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') + node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') + } + // extra setup commands for (Map.Entry command : node.config.setupCommands.entrySet()) { // the first argument is the actual script name, relative to home @@ -343,14 +350,15 @@ class ClusterFormationTasks { } /** Adds a task to extract the elasticsearch distribution */ - static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) { + static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, + Configuration configuration, String distribution) { List extractDependsOn = [configuration, setup] /* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. If it isn't then Bad Things(TM) will happen. */ Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - if (getOs().equals("windows")) { + if (getOs().equals("windows") || distribution.equals("integ-test-zip") || node.nodeVersion.before("7.0.0")) { from { project.zipTree(configuration.singleFile) } @@ -667,7 +675,13 @@ class ClusterFormationTasks { static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> exec.workingDir node.cwd - exec.environment 'JAVA_HOME', node.getJavaHome() + if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) || + node.config.distribution == 'integ-test-zip') { + exec.environment.put('JAVA_HOME', project.runtimeJavaHome) + } else { + // force JAVA_HOME to *not* be set + exec.environment.remove('JAVA_HOME') + } if (Os.isFamily(Os.FAMILY_WINDOWS)) { exec.executable 'cmd' exec.args '/C', 'call' @@ -684,9 +698,21 @@ class ClusterFormationTasks { static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) { // this closure is converted into ant nodes by groovy's AntBuilder Closure antRunner = { AntBuilder ant -> - ant.exec(executable: node.executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') { + ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, + dir: node.cwd, taskname: 'elasticsearch') { node.env.each { key, value -> env(key: key, value: value) } + if (project.isRuntimeJavaHomeSet || node.nodeVersion.before(Version.fromString("7.0.0")) || + node.config.distribution == 'integ-test-zip') { + env(key: 'JAVA_HOME', value: project.runtimeJavaHome) + } node.args.each { arg(value: it) } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + // Having no TMP on Windows defaults to C:\Windows and permission errors + // Since we configure ant to run with a new environment above, we need to explicitly pass this + String tmp = System.getenv("TMP") + assert tmp != null + env(key: "TMP", value: tmp) + } } } @@ -922,6 +948,8 @@ class ClusterFormationTasks { } doLast { project.delete(node.pidFile) + // Large tests can exhaust disk space, clean up jdk from the distribution to save some space + project.delete(new File(node.homeDir, "jdk")) } } } @@ -971,9 +999,9 @@ class ClusterFormationTasks { /** Find the current OS */ static String getOs() { String os = "linux" - if (Os.FAMILY_WINDOWS) { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { os = "windows" - } else if (Os.FAMILY_MAC) { + } else if (Os.isFamily(Os.FAMILY_MAC)) { os = "darwin" } return os diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 63af1dda03c3d..ae365038ccf3f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -23,7 +23,6 @@ import com.sun.jna.Native import com.sun.jna.WString import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.Version -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project import java.nio.file.Files @@ -240,11 +239,6 @@ class NodeInfo { return Native.toString(shortPath).substring(4) } - /** Return the java home used by this node. */ - String getJavaHome() { - return javaVersion == null ? project.runtimeJavaHome : project.javaVersions.get(javaVersion) - } - /** Returns debug string for the command that started this node. */ String getCommandString() { String esCommandString = "\nNode ${nodeNum} configuration:\n" @@ -252,7 +246,6 @@ class NodeInfo { esCommandString += "| cwd: ${cwd}\n" esCommandString += "| command: ${executable} ${args.join(' ')}\n" esCommandString += '| environment:\n' - esCommandString += "| JAVA_HOME: ${javaHome}\n" env.each { k, v -> esCommandString += "| ${k}: ${v}\n" } if (config.daemonize) { esCommandString += "|\n| [${wrapperScript.name}]\n" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 218a6c87dbbf0..e19f248515073 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -18,28 +18,34 @@ */ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingTask import org.elasticsearch.gradle.VersionProperties import org.gradle.api.DefaultTask import org.gradle.api.Task import org.gradle.api.execution.TaskExecutionAdapter +import org.gradle.api.logging.Logger +import org.gradle.api.logging.Logging import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskState import org.gradle.api.tasks.options.Option +import org.gradle.api.tasks.testing.Test import org.gradle.plugins.ide.idea.IdeaPlugin +import org.gradle.process.CommandLineArgumentProvider import java.nio.charset.StandardCharsets import java.nio.file.Files import java.util.stream.Stream + /** * A wrapper task around setting up a cluster and running rest tests. */ -public class RestIntegTestTask extends DefaultTask { +class RestIntegTestTask extends DefaultTask { + + private static final Logger LOGGER = Logging.getLogger(RestIntegTestTask) protected ClusterConfiguration clusterConfig - protected RandomizedTestingTask runner + protected Test runner protected Task clusterInit @@ -50,36 +56,67 @@ public class RestIntegTestTask extends DefaultTask { @Input Boolean includePackaged = false - public RestIntegTestTask() { - runner = project.tasks.create("${name}Runner", RandomizedTestingTask.class) + RestIntegTestTask() { + runner = project.tasks.create("${name}Runner", Test.class) super.dependsOn(runner) clusterInit = project.tasks.create(name: "${name}Cluster#init", dependsOn: project.testClasses) runner.dependsOn(clusterInit) clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) + // disable the build cache for rest test tasks + // there are a number of inputs we aren't properly tracking here so we'll just not cache these for now + runner.outputs.doNotCacheIf('Caching is disabled for REST integration tests') { true } + // override/add more for rest tests - runner.parallelism = '1' + runner.maxParallelForks = 1 runner.include('**/*IT.class') runner.systemProperty('tests.rest.load_packaged', 'false') + /* + * We use lazy-evaluated strings in order to configure system properties whose value will not be known until + * execution time (e.g. cluster port numbers). Adding these via the normal DSL doesn't work as these get treated + * as task inputs and therefore Gradle attempts to snapshot them before/after task execution. This fails due + * to the GStrings containing references to non-serializable objects. + * + * We bypass this by instead passing this system properties vi a CommandLineArgumentProvider. This has the added + * side-effect that these properties are NOT treated as inputs, therefore they don't influence things like the + * build cache key or up to date checking. + */ + def nonInputProperties = new CommandLineArgumentProvider() { + private final Map systemProperties = [:] + + void systemProperty(String key, Object value) { + systemProperties.put(key, value) + } + + @Override + Iterable asArguments() { + return systemProperties.collect { key, value -> + "-D${key}=${value.toString()}".toString() + } + } + } + runner.jvmArgumentProviders.add(nonInputProperties) + runner.ext.nonInputProperties = nonInputProperties + if (System.getProperty("tests.rest.cluster") == null) { if (System.getProperty("tests.cluster") != null) { throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") } // we pass all nodes to the rest cluster to allow the clients to round-robin between them // this is more realistic than just talking to a single node - runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}") - runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") + nonInputProperties.systemProperty('tests.rest.cluster', "${-> nodes.collect { it.httpUri() }.join(",")}") + nonInputProperties.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops - runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + nonInputProperties.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") // dump errors and warnings from cluster log on failure TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { @Override void afterExecute(Task task, TaskState state) { - if (state.failure != null) { + if (task == runner && state.failure != null) { for (NodeInfo nodeInfo : nodes) { printLogExcerpt(nodeInfo) } @@ -167,9 +204,9 @@ public class RestIntegTestTask extends DefaultTask { /** Print out an excerpt of the log from the given node. */ protected static void printLogExcerpt(NodeInfo nodeInfo) { File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log") - println("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:") - println("(full log at ${logFile})") - println('-----------------------------------------') + LOGGER.lifecycle("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:") + LOGGER.lifecycle("(full log at ${logFile})") + LOGGER.lifecycle('-----------------------------------------') Stream stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8) try { boolean inStartup = true @@ -184,9 +221,9 @@ public class RestIntegTestTask extends DefaultTask { } if (inStartup || inExcerpt) { if (linesSkipped != 0) { - println("... SKIPPED ${linesSkipped} LINES ...") + LOGGER.lifecycle("... SKIPPED ${linesSkipped} LINES ...") } - println(line) + LOGGER.lifecycle(line) linesSkipped = 0 } else { ++linesSkipped @@ -198,7 +235,7 @@ public class RestIntegTestTask extends DefaultTask { } finally { stream.close() } - println('=========================================') + LOGGER.lifecycle('=========================================') } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index 9e41466ebdd73..2a858206ebd72 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -20,7 +20,8 @@ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin + +import groovy.transform.CompileStatic import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.VersionProperties @@ -28,48 +29,66 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project +import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.plugins.JavaPlugin +import org.gradle.api.tasks.SourceSet +import org.gradle.api.tasks.SourceSetContainer import org.gradle.api.tasks.compile.JavaCompile +import org.gradle.api.tasks.testing.Test +import org.gradle.plugins.ide.eclipse.model.EclipseModel +import org.gradle.plugins.ide.idea.model.IdeaModel + /** * Configures the build to compile tests against Elasticsearch's test framework * and run REST tests. Use BuildPlugin if you want to build main code as well * as tests. */ -public class StandaloneRestTestPlugin implements Plugin { +@CompileStatic +class StandaloneRestTestPlugin implements Plugin { @Override - public void apply(Project project) { + void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.build')) { throw new InvalidUserDataException('elasticsearch.standalone-test ' + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' + 'are mutually exclusive') } project.pluginManager.apply(JavaBasePlugin) - project.pluginManager.apply(RandomizedTestingPlugin) project.getTasks().create("buildResources", ExportElasticsearchBuildResourcesTask) BuildPlugin.globalBuildInfo(project) BuildPlugin.configureRepositories(project) - BuildPlugin.applyCommonTestConfig(project) + BuildPlugin.configureTestTasks(project) // only setup tests to build - project.sourceSets.create('test') + SourceSetContainer sourceSets = project.extensions.getByType(SourceSetContainer) + SourceSet testSourceSet = sourceSets.create('test') + + project.tasks.withType(Test) { Test test -> + test.testClassesDirs = testSourceSet.output.classesDirs + test.classpath = testSourceSet.runtimeClasspath + } + // create a compileOnly configuration as others might expect it project.configurations.create("compileOnly") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") - project.eclipse.classpath.sourceSets = [project.sourceSets.test] - project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] - project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs - project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]] + EclipseModel eclipse = project.extensions.getByType(EclipseModel) + eclipse.classpath.sourceSets = [testSourceSet] + eclipse.classpath.plusConfigurations = [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)] + + IdeaModel idea = project.extensions.getByType(IdeaModel) + idea.module.testSourceDirs += testSourceSet.java.srcDirs + idea.module.scopes.put('TEST', [plus: [project.configurations.getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME)]] as Map>) PrecommitTasks.create(project, false) - project.check.dependsOn(project.precommit) + project.tasks.getByName('check').dependsOn(project.tasks.getByName('precommit')) - project.tasks.withType(JavaCompile) { + project.tasks.withType(JavaCompile) { JavaCompile task -> // This will be the default in Gradle 5.0 - if (options.compilerArgs.contains("-processor") == false) { - options.compilerArgs << '-proc:none' + if (task.options.compilerArgs.contains("-processor") == false) { + task.options.compilerArgs << '-proc:none' } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index 95818240cdaaa..ccdffd6458aa4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -19,34 +19,30 @@ package org.elasticsearch.gradle.test -import com.carrotsearch.gradle.junit4.RandomizedTestingTask +import groovy.transform.CompileStatic import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.tasks.testing.Test /** * Configures the build to compile against Elasticsearch's test framework and * run integration and unit tests. Use BuildPlugin if you want to build main * code as well as tests. */ -public class StandaloneTestPlugin implements Plugin { +@CompileStatic +class StandaloneTestPlugin implements Plugin { @Override - public void apply(Project project) { + void apply(Project project) { project.pluginManager.apply(StandaloneRestTestPlugin) - Map testOptions = [ - name: 'test', - type: RandomizedTestingTask, - dependsOn: 'testClasses', - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Runs unit tests that are separate' - ] - RandomizedTestingTask test = project.tasks.create(testOptions) + Test test = project.tasks.create('test', Test) + test.group = JavaBasePlugin.VERIFICATION_GROUP + test.description = 'Runs unit tests that are separate' + BuildPlugin.configureCompile(project) - test.classpath = project.sourceSets.test.runtimeClasspath - test.testClassesDirs = project.sourceSets.test.output.classesDirs - test.mustRunAfter(project.precommit) - project.check.dependsOn(test) + test.mustRunAfter(project.tasks.getByName('precommit')) + project.tasks.getByName('check').dependsOn(test) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy index e15759a1fe588..0be294fb00523 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.vagrant -import com.carrotsearch.gradle.junit4.LoggingOutputStream +import org.elasticsearch.gradle.LoggingOutputStream import org.gradle.api.GradleScriptException import org.gradle.api.logging.Logger import org.gradle.internal.logging.progress.ProgressLogger diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy index e899c0171298b..f3031f73c236d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.vagrant -import com.carrotsearch.gradle.junit4.LoggingOutputStream +import org.elasticsearch.gradle.LoggingOutputStream import org.gradle.internal.logging.progress.ProgressLogger /** diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 8f54d63f4ca14..bde3d7b5c0347 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -25,11 +25,12 @@ class VagrantTestPlugin implements Plugin { 'centos-7', 'debian-8', 'debian-9', - 'fedora-27', 'fedora-28', + 'fedora-29', 'oel-6', 'oel-7', 'opensuse-42', + /* TODO: need a real RHEL license now that it is out of beta 'rhel-8',*/ 'sles-12', 'ubuntu-1404', 'ubuntu-1604', @@ -60,7 +61,15 @@ class VagrantTestPlugin implements Plugin { 'packages:rpm', 'packages:oss-rpm', 'packages:deb', - 'packages:oss-deb' + 'packages:oss-deb', + 'archives:no-jdk-linux-tar', + 'archives:oss-no-jdk-linux-tar', + 'archives:no-jdk-windows-zip', + 'archives:oss-no-jdk-windows-zip', + 'packages:no-jdk-rpm', + 'packages:oss-no-jdk-rpm', + 'packages:no-jdk-deb', + 'packages:oss-no-jdk-deb' ]) /** Packages onboarded for upgrade tests **/ @@ -167,6 +176,7 @@ class VagrantTestPlugin implements Plugin { which should work for 5.0.0+. This isn't a real ivy repository but gradle is fine with that */ repos.ivy { + name "elasticsearch" artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" } } @@ -191,26 +201,38 @@ class VagrantTestPlugin implements Plugin { dependencies.add(project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } - // The version of elasticsearch that we upgrade *from* - VersionCollection.UnreleasedVersionInfo unreleasedInfo = project.bwcVersions.unreleasedInfo(upgradeFromVersion) - if (unreleasedInfo != null) { - // handle snapshots pointing to bwc build - UPGRADE_FROM_ARCHIVES.each { - dependencies.add(project.dependencies.project( - path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: it)) - if (upgradeFromVersion.onOrAfter('6.3.0')) { + if (project.ext.bwc_tests_enabled) { + // The version of elasticsearch that we upgrade *from* + // we only add them as dependencies if the bwc tests are enabled, so we don't trigger builds otherwise + VersionCollection.UnreleasedVersionInfo unreleasedInfo = project.bwcVersions.unreleasedInfo(upgradeFromVersion) + if (unreleasedInfo != null) { + // handle snapshots pointing to bwc build + UPGRADE_FROM_ARCHIVES.each { dependencies.add(project.dependencies.project( - path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: "oss-${it}")) + path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: it)) + if (upgradeFromVersion.onOrAfter('6.3.0')) { + dependencies.add(project.dependencies.project( + path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: "oss-${it}")) + } } - } - } else { - UPGRADE_FROM_ARCHIVES.each { - // The version of elasticsearch that we upgrade *from* - dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") - if (upgradeFromVersion.onOrAfter('6.3.0')) { - dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + } else { + UPGRADE_FROM_ARCHIVES.each { + // The version of elasticsearch that we upgrade *from* + if (upgradeFromVersion.onOrAfter('7.0.0')) { + String arch = it == "rpm" ? "x86_64" : "amd64" + dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}-${arch}@${it}") + dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}-${arch}@${it}") + } else { + dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}") + if (upgradeFromVersion.onOrAfter('6.3.0')) { + dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + } + } } } + } else { + // Upgrade tests will go from current to current when the BWC tests are disabled to skip real BWC tests. + upgradeFromVersion = Version.fromString(project.version) } for (Object dependency : dependencies) { diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java index 0174f576e2bcc..b5327ed6322b0 100644 --- a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -18,6 +18,7 @@ */ package org.elasticsearch; +import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.Action; import org.gradle.api.Project; import org.gradle.api.file.CopySpec; @@ -25,6 +26,7 @@ import org.gradle.api.file.FileTree; import org.gradle.api.tasks.WorkResult; import org.gradle.process.ExecResult; +import org.gradle.process.ExecSpec; import org.gradle.process.JavaExecSpec; import java.io.File; @@ -70,4 +72,8 @@ public FileTree zipTree(File zipPath) { public FileCollection fileTree(File dir) { return project.fileTree(dir); } + + public void loggedExec(Action action) { + LoggedExec.exec(project, action); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java index 721eddb52915b..f0e406e00ed68 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -20,23 +20,36 @@ public enum Distribution { - INTEG_TEST("integ-test", "zip"), - ZIP("elasticsearch", "zip"), - ZIP_OSS("elasticsearch-oss", "zip"); + INTEG_TEST("integ-test"), + DEFAULT("elasticsearch"), + OSS("elasticsearch-oss"); private final String fileName; - private final String fileExtension; - Distribution(String name, String fileExtension) { + Distribution(String name) { this.fileName = name; - this.fileExtension = fileExtension; } - public String getFileName() { + public String getArtifactName() { return fileName; } public String getFileExtension() { - return fileExtension; + if (this.equals(INTEG_TEST)) { + return "zip"; + } else { + return OS.conditionalString() + .onUnix(() -> "tar.gz") + .onWindows(() -> "zip") + .supply(); + } + } + + public String getClassifier() { + return OS.conditional() + .onLinux(() -> "linux-x86_64") + .onWindows(() -> "windows-x86_64") + .onMac(() -> "darwin-x86_64") + .supply(); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/OS.java b/buildSrc/src/main/java/org/elasticsearch/gradle/OS.java new file mode 100644 index 0000000000000..a8f158270cb9c --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/OS.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.function.Supplier; + +public enum OS { + WINDOWS, + MAC, + LINUX; + + public static OS current() { + String os = System.getProperty("os.name", ""); + if (os.startsWith("Windows")) { + return OS.WINDOWS; + } + if (os.startsWith("Linux") || os.startsWith("LINUX")) { + return OS.LINUX; + } + if (os.startsWith("Mac")) { + return OS.MAC; + } + throw new IllegalStateException("Can't determine OS from: " + os); + } + + public static class Conditional { + + private final Map> conditions = new HashMap<>(); + + public Conditional onWindows(Supplier supplier) { + conditions.put(WINDOWS, supplier); + return this; + } + + public Conditional onLinux(Supplier supplier) { + conditions.put(LINUX, supplier); + return this; + } + + public Conditional onMac(Supplier supplier) { + conditions.put(MAC, supplier); + return this; + } + + public Conditional onUnix(Supplier supplier) { + conditions.put(MAC, supplier); + conditions.put(LINUX, supplier); + return this; + } + + public T supply() { + HashSet missingOS = new HashSet<>(Arrays.asList(OS.values())); + missingOS.removeAll(conditions.keySet()); + if (missingOS.isEmpty() == false) { + throw new IllegalArgumentException("No condition specified for " + missingOS); + } + return conditions.get(OS.current()).get(); + } + + } + + public static Conditional conditional() { + return new Conditional<>(); + } + + public static Conditional conditionalString() { + return conditional(); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java index e2af34dbabdc0..3d1b5fb4ebf56 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java @@ -171,30 +171,38 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { } public void forPreviousUnreleased(Consumer consumer) { - getUnreleased().stream() + List collect = getUnreleased().stream() .filter(version -> version.equals(currentVersion) == false) - .forEach(version -> consumer.accept( - new UnreleasedVersionInfo( + .map(version -> new UnreleasedVersionInfo( version, getBranchFor(version), getGradleProjectNameFor(version) ) - )); + ) + .collect(Collectors.toList()); + + collect.forEach(uvi -> consumer.accept(uvi)); } private String getGradleProjectNameFor(Version version) { if (version.equals(currentVersion)) { throw new IllegalArgumentException("The Gradle project to build " + version + " is the current build."); } + Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); if (version.getRevision() == 0) { - if (releasedMajorGroupedByMinor - .get(releasedMajorGroupedByMinor.keySet().stream().max(Integer::compareTo).orElse(0)) - .contains(version)) { - return "minor"; + List unreleasedStagedOrMinor = getUnreleased().stream() + .filter(v -> v.getRevision() == 0) + .collect(Collectors.toList()); + if (unreleasedStagedOrMinor.size() > 2) { + if (unreleasedStagedOrMinor.get(unreleasedStagedOrMinor.size() - 2).equals(version)) { + return "minor"; + } else{ + return "staged"; + } } else { - return "staged"; + return "minor"; } } else { if (releasedMajorGroupedByMinor @@ -210,7 +218,14 @@ private String getGradleProjectNameFor(Version version) { private String getBranchFor(Version version) { switch (getGradleProjectNameFor(version)) { case "minor": - return version.getMajor() + ".x"; + // The .x branch will always point to the latest minor (for that major), so a "minor" project will be on the .x branch + // unless there is more recent (higher) minor. + final Version latestInMajor = getLatestVersionByKey(groupByMajor, version.getMajor()); + if (latestInMajor.getMinor() == version.getMinor() && isFinalMinor(version) == false) { + return version.getMajor() + ".x"; + } else { + return version.getMajor() + "." + version.getMinor(); + } case "staged": case "maintenance": case "bugfix": @@ -220,13 +235,30 @@ private String getBranchFor(Version version) { } } + /** + * There is no way to infer that 6.8 is the final minor release in the 6.x series until we add a 7.0.1 or 7.1.0 version. + * Based on the available versions (7.0.0, 6.7.0, 6.6.1, 6.6.0) the logical conclusion is that 7.0.0 is "master" and 6.8.0 is "6.x" + * This method force 6.8.0 to be recognised as being on the "6.8" branch + */ + private boolean isFinalMinor(Version version) { + return (version.getMajor() == 6 && version.getMinor() == 8); + } + public List getUnreleased() { List unreleased = new ArrayList<>(); // The current version is being worked, is always unreleased unreleased.add(currentVersion); // the tip of the previous major is unreleased for sure, be it a minor or a bugfix - unreleased.add(getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1)); + final Version latestOfPreviousMajor = getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1); + unreleased.add(latestOfPreviousMajor); + if (latestOfPreviousMajor.getRevision() == 0) { + // if the previous major is a x.y.0 release, then the tip of the minor before that (y-1) is also unreleased + final Version previousMinor = getLatestInMinor(latestOfPreviousMajor.getMajor(), latestOfPreviousMajor.getMinor() - 1); + if (previousMinor != null) { + unreleased.add(previousMinor); + } + } final Map> groupByMinor = getReleasedMajorGroupedByMinor(); int greatestMinor = groupByMinor.keySet().stream().max(Integer::compareTo).orElse(0); @@ -239,8 +271,10 @@ public List getUnreleased() { unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1)); if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) { // we found that the previous minor is staged but not yet released - // in this case, the minor before that has a bugfix - unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + // in this case, the minor before that has a bugfix, should there be such a minor + if (greatestMinor >= 2) { + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + } } } @@ -252,6 +286,13 @@ public List getUnreleased() { ); } + private Version getLatestInMinor(int major, int minor) { + return groupByMajor.get(major).stream() + .filter(v -> v.getMinor() == minor) + .max(Version::compareTo) + .orElse(null); + } + private Version getLatestVersionByKey(Map> groupByMajor, int key) { return groupByMajor.getOrDefault(key, emptyList()).stream() .max(Version::compareTo) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java new file mode 100644 index 0000000000000..a8680ef13dda0 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.http; + +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.KeyManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * A utility to wait for a specific HTTP resource to be available, optionally with customized TLS trusted CAs. + * This is logically similar to using the Ant Get task to retrieve a resource, but with the difference that it can + * access resources that do not use the JRE's default trusted CAs. + */ +public class WaitForHttpResource { + + private static final Logger logger = Logging.getLogger(WaitForHttpResource.class); + + private Set validResponseCodes = Collections.singleton(200); + private URL url; + private Set certificateAuthorities; + private File trustStoreFile; + private String trustStorePassword; + private String username; + private String password; + + public WaitForHttpResource(String protocol, String host, int numberOfNodes) throws MalformedURLException { + this(new URL(protocol + "://" + host + "/_cluster/health?wait_for_nodes=>=" + numberOfNodes + "&wait_for_status=yellow")); + } + + public WaitForHttpResource(URL url) { + this.url = url; + } + + public void setValidResponseCodes(int... validResponseCodes) { + this.validResponseCodes = new HashSet<>(validResponseCodes.length); + for (int rc : validResponseCodes) { + this.validResponseCodes.add(rc); + } + } + + public void setCertificateAuthorities(File... certificateAuthorities) { + this.certificateAuthorities = new HashSet<>(Arrays.asList(certificateAuthorities)); + } + + public void setTrustStoreFile(File trustStoreFile) { + this.trustStoreFile = trustStoreFile; + } + + public void setTrustStorePassword(String trustStorePassword) { + this.trustStorePassword = trustStorePassword; + } + + public void setUsername(String username) { + this.username = username; + } + + public void setPassword(String password) { + this.password = password; + } + + public boolean wait(int durationInMs) throws GeneralSecurityException, InterruptedException, IOException { + final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); + final long sleep = Long.max(durationInMs / 10, 100); + + final SSLContext ssl; + final KeyStore trustStore = buildTrustStore(); + if (trustStore != null) { + ssl = createSslContext(trustStore); + } else { + ssl = null; + } + IOException failure = null; + for (; ; ) { + try { + checkResource(ssl); + return true; + } catch (IOException e) { + logger.debug("Failed to access resource [{}]", url, e); + failure = e; + } + if (System.nanoTime() < waitUntil) { + Thread.sleep(sleep); + } else { + logger.error("Failed to access url [{}]", url, failure); + return false; + } + } + } + + protected void checkResource(SSLContext ssl) throws IOException { + try { + final HttpURLConnection connection = buildConnection(ssl); + connection.connect(); + final Integer response = connection.getResponseCode(); + if (validResponseCodes.contains(response)) { + logger.info("Got successful response [{}] from URL [{}]", response, url); + return; + } else { + throw new IOException(response + " " + connection.getResponseMessage()); + } + } catch (IOException e) { + throw e; + } + } + + HttpURLConnection buildConnection(SSLContext ssl) throws IOException { + final HttpURLConnection connection = (HttpURLConnection) this.url.openConnection(); + configureSslContext(connection, ssl); + configureBasicAuth(connection); + connection.setRequestMethod("GET"); + return connection; + } + + private void configureSslContext(HttpURLConnection connection, SSLContext ssl) { + if (ssl != null) { + if (connection instanceof HttpsURLConnection) { + ((HttpsURLConnection) connection).setSSLSocketFactory(ssl.getSocketFactory()); + } else { + throw new IllegalStateException("SSL trust has been configured, but [" + url + "] is not a 'https' URL"); + } + } + } + + private void configureBasicAuth(HttpURLConnection connection) { + if (username != null) { + if (password == null) { + throw new IllegalStateException("Basic Auth user [" + username + + "] has been set, but no password has been configured"); + } + connection.setRequestProperty("Authorization", + "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes(StandardCharsets.UTF_8))); + } + } + + KeyStore buildTrustStore() throws GeneralSecurityException, IOException { + if (this.certificateAuthorities != null) { + if (trustStoreFile != null) { + throw new IllegalStateException("Cannot specify both truststore and CAs"); + } + return buildTrustStoreFromCA(); + } else if (trustStoreFile != null) { + return buildTrustStoreFromFile(); + } else { + return null; + } + } + + private KeyStore buildTrustStoreFromFile() throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(trustStoreFile.getName().endsWith(".jks") ? "JKS" : "PKCS12"); + try (InputStream input = new FileInputStream(trustStoreFile)) { + keyStore.load(input, trustStorePassword == null ? null : trustStorePassword.toCharArray()); + } + return keyStore; + } + + private KeyStore buildTrustStoreFromCA() throws GeneralSecurityException, IOException { + final KeyStore store = KeyStore.getInstance(KeyStore.getDefaultType()); + store.load(null, null); + final CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + int counter = 0; + for (File ca : certificateAuthorities) { + try (InputStream input = new FileInputStream(ca)) { + for (Certificate certificate : certFactory.generateCertificates(input)) { + store.setCertificateEntry("cert-" + counter, certificate); + counter++; + } + } + } + return store; + } + + private SSLContext createSslContext(KeyStore trustStore) throws GeneralSecurityException { + checkForTrustEntry(trustStore); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(trustStore); + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(new KeyManager[0], tmf.getTrustManagers(), new SecureRandom()); + return sslContext; + } + + private void checkForTrustEntry(KeyStore trustStore) throws KeyStoreException { + Enumeration enumeration = trustStore.aliases(); + while (enumeration.hasMoreElements()) { + if (trustStore.isCertificateEntry(enumeration.nextElement())) { + // found trusted cert entry + return; + } + } + throw new IllegalStateException("Trust-store does not contain any trusted certificate entries"); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java index d68985ff17ab6..f858ec26fc158 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.java @@ -66,6 +66,7 @@ public class ForbiddenPatternsTask extends DefaultTask { .exclude("**/*.zip") .exclude("**/*.jks") .exclude("**/*.crt") + .exclude("**/*.keystore") .exclude("**/*.png"); /* @@ -111,13 +112,13 @@ public void checkInvalidPatterns() throws IOException { .collect(Collectors.toList()); String path = getProject().getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); - failures = invalidLines.stream() + failures.addAll(invalidLines.stream() .map(l -> new AbstractMap.SimpleEntry<>(l+1, lines.get(l))) .flatMap(kv -> patterns.entrySet().stream() .filter(p -> Pattern.compile(p.getValue()).matcher(kv.getValue()).find()) .map(p -> "- " + p.getKey() + " on line " + kv.getKey() + " of " + path) ) - .collect(Collectors.toList()); + .collect(Collectors.toList())); } if (failures.isEmpty() == false) { throw new GradleException("Found invalid patterns:\n" + String.join("\n", failures)); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java index 04e1343f4ac92..95ad323ceda83 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/TestingConventionsTasks.java @@ -31,12 +31,10 @@ import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.testing.Test; -import org.gradle.api.tasks.util.PatternFilterable; import java.io.File; import java.io.IOException; import java.lang.annotation.Annotation; -import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.net.MalformedURLException; @@ -67,7 +65,7 @@ public class TestingConventionsTasks extends DefaultTask { public TestingConventionsTasks() { setDescription("Tests various testing conventions"); // Run only after everything is compiled - Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getClassesTaskName())); + Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs())); naming = getProject().container(TestingConventionRule.class); } @@ -75,17 +73,6 @@ public TestingConventionsTasks() { public Map> classFilesPerEnabledTask(FileTree testClassFiles) { Map> collector = new HashMap<>(); - // RandomizedTestingTask - collector.putAll( - getProject().getTasks().withType(getRandomizedTestingTask()).stream() - .filter(Task::getEnabled) - .collect(Collectors.toMap( - Task::getPath, - task -> testClassFiles.matching(getRandomizedTestingPatternSet(task)).getFiles() - ) - ) - ); - // Gradle Test collector.putAll( getProject().getTasks().withType(Test.class).stream() @@ -279,32 +266,6 @@ private String collectProblems(String... problems) { .collect(Collectors.joining("\n")); } - @SuppressWarnings("unchecked") - private PatternFilterable getRandomizedTestingPatternSet(Task task) { - try { - if ( - getRandomizedTestingTask().isAssignableFrom(task.getClass()) == false - ) { - throw new IllegalStateException("Expected " + task + " to be RandomizedTestingTask or Test but it was " + task.getClass()); - } - Method getPatternSet = task.getClass().getMethod("getPatternSet"); - return (PatternFilterable) getPatternSet.invoke(task); - } catch (NoSuchMethodException e) { - throw new IllegalStateException("Expecte task to have a `patternSet` " + task, e); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalStateException("Failed to get pattern set from task" + task, e); - } - } - - @SuppressWarnings("unchecked") - private Class getRandomizedTestingTask() { - try { - return (Class) Class.forName("com.carrotsearch.gradle.junit4.RandomizedTestingTask"); - } catch (ClassNotFoundException | ClassCastException e) { - throw new IllegalStateException("Failed to load randomized testing class", e); - } - } - private String checkNoneExists(String message, Stream> stream) { String problem = stream .map(each -> " * " + each.getName()) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java new file mode 100644 index 0000000000000..ce806b48e56a7 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/test/ErrorReportingTestListener.java @@ -0,0 +1,266 @@ +package org.elasticsearch.gradle.test; + +import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter; +import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.testing.TestDescriptor; +import org.gradle.api.tasks.testing.TestListener; +import org.gradle.api.tasks.testing.TestOutputEvent; +import org.gradle.api.tasks.testing.TestOutputListener; +import org.gradle.api.tasks.testing.TestResult; +import org.gradle.api.tasks.testing.logging.TestLogging; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.UncheckedIOException; +import java.io.Writer; +import java.util.Deque; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public class ErrorReportingTestListener implements TestOutputListener, TestListener { + private static final Logger LOGGER = Logging.getLogger(ErrorReportingTestListener.class); + private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH"; + + private final TestExceptionFormatter formatter; + private final File outputDirectory; + private Map eventWriters = new ConcurrentHashMap<>(); + private Map> reproductionLines = new ConcurrentHashMap<>(); + private Set failedTests = new LinkedHashSet<>(); + + public ErrorReportingTestListener(TestLogging testLogging, File outputDirectory) { + this.formatter = new FullExceptionFormatter(testLogging); + this.outputDirectory = outputDirectory; + } + + @Override + public void onOutput(TestDescriptor testDescriptor, TestOutputEvent outputEvent) { + TestDescriptor suite = testDescriptor.getParent(); + + // Check if this is output from the test suite itself (e.g. afterTest or beforeTest) + if (testDescriptor.isComposite()) { + suite = testDescriptor; + } + + // Hold on to any repro messages so we can report them immediately on test case failure + if (outputEvent.getMessage().startsWith(REPRODUCE_WITH_PREFIX)) { + Deque lines = reproductionLines.computeIfAbsent(Descriptor.of(suite), d -> new LinkedList<>()); + lines.add(outputEvent.getMessage()); + } + + EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(suite), EventWriter::new); + eventWriter.write(outputEvent); + } + + @Override + public void beforeSuite(TestDescriptor suite) { + + } + + @Override + public void afterSuite(final TestDescriptor suite, TestResult result) { + Descriptor descriptor = Descriptor.of(suite); + + try { + // if the test suite failed, report all captured output + if (result.getResultType().equals(TestResult.ResultType.FAILURE)) { + EventWriter eventWriter = eventWriters.get(descriptor); + + if (eventWriter != null) { + // It's not explicit what the threading guarantees are for TestListener method execution so we'll + // be explicitly safe here to avoid interleaving output from multiple test suites + synchronized (this) { + // make sure we've flushed everything to disk before reading + eventWriter.flush(); + + System.err.println("\n\nSuite: " + suite); + + try (BufferedReader reader = eventWriter.reader()) { + PrintStream out = System.out; + for (String message = reader.readLine(); message != null; message = reader.readLine()) { + if (message.startsWith(" 1> ")) { + out = System.out; + } else if (message.startsWith(" 2> ")) { + out = System.err; + } + + out.println(message); + } + } + } + } + } + } catch (IOException e) { + throw new UncheckedIOException("Error reading test suite output", e); + } finally { + reproductionLines.remove(descriptor); + EventWriter writer = eventWriters.remove(descriptor); + if (writer != null) { + try { + writer.close(); + } catch (IOException e) { + LOGGER.error("Failed to close test suite output stream", e); + } + } + } + } + + @Override + public void beforeTest(TestDescriptor testDescriptor) { + + } + + @Override + public void afterTest(TestDescriptor testDescriptor, TestResult result) { + if (result.getResultType() == TestResult.ResultType.FAILURE) { + failedTests.add(Descriptor.of(testDescriptor)); + + if (testDescriptor.getParent() != null) { + // go back and fetch the reproduction line for this test failure + Deque lines = reproductionLines.get(Descriptor.of(testDescriptor.getParent())); + if (lines != null) { + String line = lines.getLast(); + if (line != null) { + System.err.print('\n' + line); + } + } + + // include test failure exception stacktraces in test suite output log + if (result.getExceptions().size() > 0) { + String message = formatter.format(testDescriptor, result.getExceptions()).substring(4); + EventWriter eventWriter = eventWriters.computeIfAbsent(Descriptor.of(testDescriptor.getParent()), EventWriter::new); + + eventWriter.write(new TestOutputEvent() { + @Override + public Destination getDestination() { + return Destination.StdErr; + } + + @Override + public String getMessage() { + return message; + } + }); + } + } + } + } + + public Set getFailedTests() { + return failedTests; + } + + /** + * Class for identifying test output sources. We use this rather than Gradle's {@link TestDescriptor} as we want + * to avoid any nasty memory leak issues that come from keeping Gradle implementation types in memory. Since we + * use this a the key for our HashMap, it's best to control the implementation as there's no guarantee that Gradle's + * various {@link TestDescriptor} implementations reliably implement equals and hashCode. + */ + public static class Descriptor { + private final String name; + private final String className; + private final String parent; + + private Descriptor(String name, String className, String parent) { + this.name = name; + this.className = className; + this.parent = parent; + } + + public static Descriptor of(TestDescriptor d) { + return new Descriptor(d.getName(), d.getClassName(), d.getParent() == null ? null : d.getParent().toString()); + } + + public String getClassName() { + return className; + } + + public String getFullName() { + return className + "." + name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Descriptor that = (Descriptor) o; + return Objects.equals(name, that.name) && + Objects.equals(className, that.className) && + Objects.equals(parent, that.parent); + } + + @Override + public int hashCode() { + return Objects.hash(name, className, parent); + } + } + + private class EventWriter implements Closeable { + private final File outputFile; + private final Writer writer; + + EventWriter(Descriptor descriptor) { + this.outputFile = new File(outputDirectory, descriptor.getClassName() + ".out"); + + FileOutputStream fos; + try { + fos = new FileOutputStream(this.outputFile); + } catch (IOException e) { + throw new UncheckedIOException("Unable to create test suite output file", e); + } + + this.writer = new PrintWriter(new BufferedOutputStream(fos)); + } + + public void write(TestOutputEvent event) { + String prefix; + if (event.getDestination() == TestOutputEvent.Destination.StdOut) { + prefix = " 1> "; + } else { + prefix = " 2> "; + } + + try { + if (event.getMessage().equals("\n")) { + writer.write(event.getMessage()); + } else { + writer.write(prefix + event.getMessage()); + } + } catch (IOException e) { + throw new UncheckedIOException("Unable to write test suite output", e); + } + } + + public void flush() throws IOException { + writer.flush(); + } + + public BufferedReader reader() { + try { + return new BufferedReader(new FileReader(outputFile)); + } catch (IOException e) { + throw new UncheckedIOException("Unable to read test suite output file", e); + } + } + + @Override + public void close() throws IOException { + writer.close(); + + // there's no need to keep this stuff on disk after suite execution + outputFile.delete(); + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index cb7986b9a3051..1010bc54f8636 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -20,10 +20,10 @@ import org.elasticsearch.GradleServicesAdapter; import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; -import org.gradle.internal.os.OperatingSystem; import java.io.BufferedReader; import java.io.File; @@ -31,11 +31,14 @@ import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.net.HttpURLConnection; +import java.net.URI; import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -65,7 +68,9 @@ public class ElasticsearchNode { private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; private static final int NODE_UP_TIMEOUT = 30; private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS; + private final LinkedHashMap> waitConditions; + private final List plugins = new ArrayList<>(); private final Path confPathRepo; private final Path configFile; @@ -75,6 +80,7 @@ public class ElasticsearchNode { private final Path httpPortsFile; private final Path esStdoutFile; private final Path esStderrFile; + private final Path tmpDir; private Distribution distribution; private String version; @@ -96,6 +102,7 @@ public class ElasticsearchNode { httpPortsFile = confPathLogs.resolve("http.ports"); esStdoutFile = confPathLogs.resolve("es.stdout.log"); esStderrFile = confPathLogs.resolve("es.stderr.log"); + tmpDir = workingDir.resolve("tmp"); this.waitConditions = new LinkedHashMap<>(); waitConditions.put("http ports file", node -> Files.exists(node.httpPortsFile)); waitConditions.put("transport ports file", node -> Files.exists(node.transportPortFile)); @@ -126,9 +133,20 @@ public void setDistribution(Distribution distribution) { this.distribution = distribution; } + public void plugin(URI plugin) { + requireNonNull(plugin, "Plugin name can't be null"); + checkFrozen(); + this.plugins.add(plugin); + } + + public void plugin(File plugin) { + plugin(plugin.toURI()); + } + public void freeze() { requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); + requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); logger.info("Locking configuration of `{}`", this); configurationFrozen.set(true); } @@ -165,12 +183,20 @@ private void waitForUri(String description, String uri) { }); } + /** + * Returns a stream of lines in the generated logs similar to Files.lines + * + * @return stream of log lines + */ + public Stream logLines() throws IOException { + return Files.lines(esStdoutFile, StandardCharsets.UTF_8); + } + synchronized void start() { logger.info("Starting `{}`", this); Path distroArtifact = artifactsExtractDir - .resolve(distribution.getFileExtension()) - .resolve(distribution.getFileName() + "-" + getVersion()); + .resolve(distribution.getArtifactName() + "-" + getVersion()); if (Files.exists(distroArtifact) == false) { throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact); @@ -182,43 +208,79 @@ synchronized void start() { spec.from(distroArtifact.resolve("config").toFile()); spec.into(configFile.getParent()); }); - configure(); - startElasticsearchProcess(distroArtifact); + + try { + createWorkingDir(distroArtifact); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + createConfiguration(); + + plugins.forEach(plugin -> runElaticsearchBinScript( + "elasticsearch-plugin", + "install", "--batch", plugin.toString()) + ); + + startElasticsearchProcess(); } - private void startElasticsearchProcess(Path distroArtifact) { - logger.info("Running `bin/elasticsearch` in `{}` for {}", workingDir, this); - final ProcessBuilder processBuilder = new ProcessBuilder(); - if (OperatingSystem.current().isWindows()) { - processBuilder.command( - "cmd", "/c", - distroArtifact.resolve("\\bin\\elasticsearch.bat").toAbsolutePath().toString() + private void runElaticsearchBinScript(String tool, String... args) { + services.loggedExec(spec -> { + spec.setEnvironment(getESEnvironment()); + spec.workingDir(workingDir); + spec.executable( + OS.conditionalString() + .onUnix(() -> "./bin/" + tool) + .onWindows(() -> "cmd") + .supply() ); - } else { - processBuilder.command( - distroArtifact.resolve("bin/elasticsearch").toAbsolutePath().toString() + spec.args( + OS.>conditional() + .onWindows(() -> { + ArrayList result = new ArrayList<>(); + result.add("/c"); + result.add("bin\\" + tool + ".bat"); + for (String arg : args) { + result.add(arg); + } + return result; + }) + .onUnix(() -> Arrays.asList(args)) + .supply() ); - } + }); + } + + private Map getESEnvironment() { + Map environment= new HashMap<>(); + environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + environment.put("ES_PATH_CONF", configFile.getParent().toString()); + environment.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m"); + environment.put("ES_TMPDIR", tmpDir.toString()); + // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR + + environment.put("TMP", tmpDir.toString()); + return environment; + } + + private void startElasticsearchProcess() { + final ProcessBuilder processBuilder = new ProcessBuilder(); + + List command = OS.>conditional() + .onUnix(() -> Arrays.asList("./bin/elasticsearch")) + .onWindows(() -> Arrays.asList("cmd", "/c", "bin\\elasticsearch.bat")) + .supply(); + processBuilder.command(command); + processBuilder.directory(workingDir.toFile()); + Map environment = processBuilder.environment(); + // Don't inherit anything from the environment for as that would lack reproducibility + environment.clear(); + environment.putAll(getESEnvironment()); + // don't buffer all in memory, make sure we don't block on the default pipes + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(esStderrFile.toFile())); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(esStdoutFile.toFile())); + logger.info("Running `{}` in `{}` for {} env: {}", command, workingDir, this, environment); try { - processBuilder.directory(workingDir.toFile()); - Map environment = processBuilder.environment(); - // Don't inherit anything from the environment for as that would lack reproductability - environment.clear(); - if (javaHome != null) { - environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); - } else if (System.getenv().get("JAVA_HOME") != null) { - logger.warn("{}: No java home configured will use it from environment: {}", - this, System.getenv().get("JAVA_HOME") - ); - environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME")); - } else { - logger.warn("{}: No javaHome configured, will rely on default java detection", this); - } - environment.put("ES_PATH_CONF", configFile.getParent().toAbsolutePath().toString()); - environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m"); - // don't buffer all in memory, make sure we don't block on the default pipes - processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(esStderrFile.toFile())); - processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(esStdoutFile.toFile())); esProcess = processBuilder.start(); } catch (IOException e) { throw new TestClustersException("Failed to start ES process for " + this, e); @@ -243,7 +305,8 @@ synchronized void stop(boolean tailLogs) { } logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped."); - stopHandle(esProcess.toHandle()); + // Test clusters are not reused, don't spend time on a graceful shutdown + stopHandle(esProcess.toHandle(), true); if (tailLogs) { logFileContents("Standard output of node", esStdoutFile); logFileContents("Standard error of node", esStderrFile); @@ -251,27 +314,37 @@ synchronized void stop(boolean tailLogs) { esProcess = null; } - private void stopHandle(ProcessHandle processHandle) { + private void stopHandle(ProcessHandle processHandle, boolean forcibly) { // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. - if (processHandle.isAlive()) { - processHandle.children().forEach(this::stopHandle); - } - logProcessInfo("Terminating elasticsearch process:", processHandle.info()); - if (processHandle.isAlive()) { - processHandle.destroy(); - } else { + if (processHandle.isAlive() == false) { logger.info("Process was not running when we tried to terminate it."); + return; } - waitForProcessToExit(processHandle); - if (processHandle.isAlive()) { + + // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. + processHandle.children().forEach(each -> stopHandle(each, forcibly)); + + logProcessInfo( + "Terminating elasticsearch process" + (forcibly ? " forcibly " : "gracefully") + ":", + processHandle.info() + ); + + if (forcibly) { + processHandle.destroyForcibly(); + } else { + processHandle.destroy(); + waitForProcessToExit(processHandle); + if (processHandle.isAlive() == false) { + return; + } logger.info("process did not terminate after {} {}, stopping it forcefully", - ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT - ); + ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT); processHandle.destroyForcibly(); } + waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate es process"); + throw new TestClustersException("Was not able to terminate elasticsearch process"); } } @@ -308,16 +381,19 @@ private void waitForProcessToExit(ProcessHandle processHandle) { } } - private void configure() { - try { - Files.createDirectories(configFile.getParent()); - Files.createDirectories(confPathRepo); - Files.createDirectories(confPathData); - Files.createDirectories(confPathLogs); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + private void createWorkingDir(Path distroExtractDir) throws IOException { + services.sync(spec -> { + spec.from(distroExtractDir.toFile()); + spec.into(workingDir.toFile()); + }); + Files.createDirectories(configFile.getParent()); + Files.createDirectories(confPathRepo); + Files.createDirectories(confPathData); + Files.createDirectories(confPathLogs); + Files.createDirectories(tmpDir); + } + private void createConfiguration() { LinkedHashMap config = new LinkedHashMap<>(); String nodeName = safeName(name); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java deleted file mode 100644 index d1a86a38c66ff..0000000000000 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/SyncTestClustersConfiguration.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.testclusters; - -import org.gradle.api.DefaultTask; -import org.gradle.api.Project; -import org.gradle.api.file.FileCollection; -import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.OutputDirectory; -import org.gradle.api.tasks.TaskAction; - -import java.io.File; -import java.util.Set; -import java.util.stream.Collectors; - -public class SyncTestClustersConfiguration extends DefaultTask { - - @InputFiles - public FileCollection getDependencies() { - Set nonZip = getProject().getConfigurations() - .getByName(TestClustersPlugin.HELPER_CONFIGURATION_NAME) - .getFiles() - .stream() - .filter(file -> file.getName().endsWith(".zip") == false) - .collect(Collectors.toSet()); - if(nonZip.isEmpty() == false) { - throw new IllegalStateException("Expected only zip files in configuration : " + - TestClustersPlugin.HELPER_CONFIGURATION_NAME + " but found " + - nonZip - ); - } - return getProject().files( - getProject().getConfigurations() - .getByName(TestClustersPlugin.HELPER_CONFIGURATION_NAME) - .getFiles() - ); - } - - @OutputDirectory - public File getOutputDir() { - return getTestClustersConfigurationExtractDir(getProject()); - } - - @TaskAction - public void doExtract() { - File outputDir = getOutputDir(); - getProject().delete(outputDir); - outputDir.mkdirs(); - getDependencies().forEach(dep -> - getProject().copy(spec -> { - spec.from(getProject().zipTree(dep)); - spec.into(new File(outputDir, "zip")); - }) - ); - } - - static File getTestClustersConfigurationExtractDir(Project project) { - return new File(TestClustersPlugin.getTestClustersBuildDir(project), "extract"); - } - -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 3abc9a6a6177e..ee9e1dc133a87 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -27,9 +27,11 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.execution.TaskActionListener; import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.file.FileTree; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskState; import java.io.File; @@ -39,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -98,7 +101,23 @@ public void apply(Project project) { // the clusters will look for artifacts there based on the naming conventions. // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in // the build. - rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, SyncTestClustersConfiguration.class); + rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, Sync.class, sync -> { + sync.from((Callable>) () -> + helperConfiguration.getFiles() + .stream() + .map(file -> { + if (file.getName().endsWith(".zip")) { + return project.zipTree(file); + } else if (file.getName().endsWith("tar.gz")) { + return project.tarTree(file); + } else { + throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); + } + }) + .collect(Collectors.toList()) + ); + sync.into(getTestClustersConfigurationExtractDir(project)); + }); // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters // that are defined in the build script and the ones that will actually be used in this invocation of gradle @@ -129,7 +148,7 @@ private NamedDomainObjectContainer createTestClustersContaine project.getPath(), name, GradleServicesAdapter.getInstance(project), - SyncTestClustersConfiguration.getTestClustersConfigurationExtractDir(project), + getTestClustersConfigurationExtractDir(project), new File(project.getBuildDir(), "testclusters") ) ); @@ -249,8 +268,8 @@ public void beforeExecute(Task task) {} ); } - static File getTestClustersBuildDir(Project project) { - return new File(project.getRootProject().getBuildDir(), "testclusters"); + static File getTestClustersConfigurationExtractDir(Project project) { + return new File(project.getRootProject().getBuildDir(), "testclusters/extract"); } /** @@ -276,9 +295,11 @@ private static void autoConfigureClusterDependencies( project.afterEvaluate(ip -> container.forEach(esNode -> { // declare dependencies against artifacts needed by cluster formation. String dependency = String.format( - "org.elasticsearch.distribution.zip:%s:%s@zip", - esNode.getDistribution().getFileName(), - esNode.getVersion() + "unused:%s:%s:%s@%s", + esNode.getDistribution().getArtifactName(), + esNode.getVersion(), + esNode.getDistribution().getClassifier(), + esNode.getDistribution().getFileExtension() ); logger.info("Cluster {} depends on {}", esNode.getName(), dependency); rootProject.getDependencies().add(HELPER_CONFIGURATION_NAME, dependency); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 3dfccaf435031..b930955236fb8 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -21,6 +21,7 @@ import com.avast.gradle.dockercompose.ComposeExtension; import com.avast.gradle.dockercompose.DockerComposePlugin; import com.avast.gradle.dockercompose.tasks.ComposeUp; +import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.precommit.JarHellTask; import org.elasticsearch.gradle.precommit.TestingConventionsTasks; import org.elasticsearch.gradle.precommit.ThirdPartyAuditTask; @@ -30,12 +31,9 @@ import org.gradle.api.Task; import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.ExtraPropertiesExtension; -import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskContainer; -import org.gradle.internal.os.OperatingSystem; +import org.gradle.api.tasks.testing.Test; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.util.Collections; import java.util.function.BiConsumer; @@ -72,6 +70,8 @@ public void apply(Project project) { pullFixture.setEnabled(false); return; } + preProcessFixture.onlyIf(spec -> buildFixture.getEnabled()); + postProcessFixture.onlyIf(spec -> buildFixture.getEnabled()); project.apply(spec -> spec.plugin(BasePlugin.class)); project.apply(spec -> spec.plugin(DockerComposePlugin.class)); @@ -95,38 +95,51 @@ public void apply(Project project) { (name, port) -> postProcessFixture.getExtensions() .getByType(ExtraPropertiesExtension.class).set(name, port) ); + extension.fixtures.add(project); } - extension.fixtures.all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); + extension.fixtures + .matching(fixtureProject -> fixtureProject.equals(project) == false) + .all(fixtureProject -> project.evaluationDependsOn(fixtureProject.getPath())); + + conditionTaskByType(tasks, extension, Test.class); + conditionTaskByType(tasks, extension, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask")); + conditionTaskByType(tasks, extension, TestingConventionsTasks.class); + conditionTaskByType(tasks, extension, ComposeUp.class); + if (dockerComposeSupported(project) == false) { project.getLogger().warn( "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + "but none could be found so these will be skipped", project.getPath() ); - disableTaskByType(tasks, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); - // conventions are not honored when the tasks are disabled - disableTaskByType(tasks, TestingConventionsTasks.class); - disableTaskByType(tasks, ComposeUp.class); return; } - tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> + + tasks.withType(Test.class, task -> extension.fixtures.all(fixtureProject -> { - fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(buildFixture -> - task.dependsOn(buildFixture) - ); - fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(composeDown -> - task.finalizedBy(composeDown) - ); + fixtureProject.getTasks().matching(it -> it.getName().equals("buildFixture")).all(task::dependsOn); + fixtureProject.getTasks().matching(it -> it.getName().equals("composeDown")).all(task::finalizedBy); configureServiceInfoForTask( task, fixtureProject, - (name, port) -> setSystemProperty(task, name, port) + task::systemProperty ); + task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) ); } + private void conditionTaskByType(TaskContainer tasks, TestFixtureExtension extension, Class taskClass) { + tasks.withType( + taskClass, + task -> task.onlyIf(spec -> + extension.fixtures.stream() + .anyMatch(fixtureProject -> fixtureProject.getTasks().getByName("buildFixture").getEnabled() == false) == false + ) + ); + } + private void configureServiceInfoForTask(Task task, Project fixtureProject, BiConsumer consumer) { // Configure ports for the tests as system properties. // We only know these at execution time so we need to do it in doFirst @@ -155,9 +168,8 @@ private void configureServiceInfoForTask(Task task, Project fixtureProject, BiCo ); } - @Input public boolean dockerComposeSupported(Project project) { - if (OperatingSystem.current().isWindows()) { + if (OS.current().equals(OS.WINDOWS)) { return false; } final boolean hasDockerCompose = project.file("/usr/local/bin/docker-compose").exists() || @@ -165,17 +177,6 @@ public boolean dockerComposeSupported(Project project) { return hasDockerCompose && Boolean.parseBoolean(System.getProperty("tests.fixture.enabled", "true")); } - private void setSystemProperty(Task task, String name, Object value) { - try { - Method systemProperty = task.getClass().getMethod("systemProperty", String.class, Object.class); - systemProperty.invoke(task, name, value); - } catch (NoSuchMethodException e) { - throw new IllegalArgumentException("Could not find systemProperty method on RandomizedTestingTask", e); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new IllegalArgumentException("Could not call systemProperty method on RandomizedTestingTask", e); - } - } - private void disableTaskByType(TaskContainer tasks, Class type) { tasks.withType(type, task -> task.setEnabled(false)); } diff --git a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java index 23ac9458b961d..cdb0f01cf75a9 100644 --- a/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java +++ b/buildSrc/src/main/minimumRuntime/org/elasticsearch/gradle/VersionProperties.java @@ -10,6 +10,7 @@ * Accessor for shared dependency versions used by elasticsearch, namely the elasticsearch and lucene versions. */ public class VersionProperties { + public static String getElasticsearch() { return elasticsearch; } @@ -18,17 +19,25 @@ public static String getLucene() { return lucene; } + public static String getBundledJdk() { + return bundledJdk; + } + public static Map getVersions() { return versions; } private static final String elasticsearch; private static final String lucene; + private static final String bundledJdk; private static final Map versions = new HashMap(); + static { Properties props = getVersionProperties(); elasticsearch = props.getProperty("elasticsearch"); lucene = props.getProperty("lucene"); + bundledJdk = props.getProperty("bundled_jdk"); + for (String property : props.stringPropertyNames()) { versions.put(property, props.getProperty(property)); } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties deleted file mode 100644 index e1a1b8297c8eb..0000000000000 --- a/buildSrc/src/main/resources/META-INF/gradle-plugins/carrotsearch.randomized-testing.properties +++ /dev/null @@ -1 +0,0 @@ -implementation-class=com.carrotsearch.gradle.junit4.RandomizedTestingPlugin diff --git a/buildSrc/src/main/resources/minimumCompilerVersion b/buildSrc/src/main/resources/minimumCompilerVersion index b8162070734ff..35d51f33b34f9 100644 --- a/buildSrc/src/main/resources/minimumCompilerVersion +++ b/buildSrc/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -1.11 \ No newline at end of file +1.12 \ No newline at end of file diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 3bff059174b83..804440660c71c 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.1.1 \ No newline at end of file +5.2.1 \ No newline at end of file diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 39d6e433ac36e..762bcc5ff9b31 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,7 +23,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -39,7 +38,6 @@ import java.util.Objects; import java.util.stream.Collectors; -@Ignore // Awaiting a fix in https://github.com/elastic/elasticsearch/issues/37889. public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static final List EXAMPLE_PLUGINS = Collections.unmodifiableList( @@ -78,11 +76,6 @@ public static Iterable parameters() { public void testCurrentExamplePlugin() throws IOException { FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot()); - // just get rid of deprecation warnings - Files.write( - getTempPath("settings.gradle"), - "enableFeaturePreview('STABLE_PUBLISHING')\n".getBytes(StandardCharsets.UTF_8) - ); adaptBuildScriptForTest(); @@ -106,6 +99,7 @@ private void adaptBuildScriptForTest() throws IOException { "buildscript {\n" + " repositories {\n" + " maven {\n" + + " name = \"test\"\n" + " url = '" + getLocalTestRepoPath() + "'\n" + " }\n" + " }\n" + @@ -124,12 +118,14 @@ private void adaptBuildScriptForTest() throws IOException { String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision"); if (luceneSnapshotRepo != null) { luceneSnapshotRepo = " maven {\n" + - " url \"http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + + " name \"lucene-snapshots\"\n" + + " url \"https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + " }\n"; } writeBuildScript("\n" + "repositories {\n" + " maven {\n" + + " name \"test\"\n" + " url \"" + getLocalTestRepoPath() + "\"\n" + " }\n" + " flatDir {\n" + diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 99afd0bcbe0ae..7968f4f57cf90 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase { @@ -29,25 +28,19 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe public static final String PROJECT_NAME = "elasticsearch-build-resources"; public void testUpToDateWithSourcesConfigured() { - GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + getGradleRunner(PROJECT_NAME) .withArguments("clean", "-s") - .withPluginClasspath() .build(); - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); - result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskUpToDate(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); @@ -55,10 +48,8 @@ public void testUpToDateWithSourcesConfigured() { } public void testImplicitTaskDependencyCopy() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sampleCopyAll", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); @@ -69,10 +60,8 @@ public void testImplicitTaskDependencyCopy() { } public void testImplicitTaskDependencyInputFileOfOther() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sample", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":sample"); @@ -81,11 +70,12 @@ public void testImplicitTaskDependencyInputFileOfOther() { } public void testIncorrectUsage() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) - .withArguments("noConfigAfterExecution", "-s", "-i") - .withPluginClasspath() - .buildAndFail(); - assertOutputContains("buildResources can't be configured after the task ran"); + assertOutputContains( + getGradleRunner(PROJECT_NAME) + .withArguments("noConfigAfterExecution", "-s", "-i") + .buildAndFail() + .getOutput(), + "buildResources can't be configured after the task ran" + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java index d1b4e893ec6ad..530cbc88b8e16 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java @@ -81,6 +81,12 @@ public class VersionCollectionTests extends GradleUnitTestCase { "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2" )); + sampleVersions.put("7.0.0", asList( + "7_0_0", "6_8_0", "6_7_2", "6_7_1", "6_7_0", "6_6_2", "6_6_1", "6_6_0" + )); + sampleVersions.put("7.1.0", asList( + "7_1_0", "7_0_0", "6_8_0", "6_7_2", "6_7_1", "6_7_0", "6_6_1", "6_6_0" + )); } @Test(expected = IllegalArgumentException.class) @@ -145,6 +151,11 @@ public void testWireCompatible() { singletonList("7.3.0"), getVersionCollection("8.0.0").getWireCompatible() ); + assertVersionsEquals( + asList("6.8.0", "7.0.0"), + getVersionCollection("7.1.0").getWireCompatible() + ); + } public void testWireCompatibleUnreleased() { @@ -171,6 +182,10 @@ public void testWireCompatibleUnreleased() { singletonList("7.3.0"), getVersionCollection("8.0.0").getUnreleasedWireCompatible() ); + assertVersionsEquals( + asList("6.8.0", "7.0.0"), + getVersionCollection("7.1.0").getWireCompatible() + ); } public void testIndexCompatible() { @@ -270,6 +285,14 @@ public void testGetUnreleased() { asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleased() ); + assertVersionsEquals( + asList("6.7.2", "6.8.0", "7.0.0", "7.1.0"), + getVersionCollection("7.1.0").getUnreleased() + ); + assertVersionsEquals( + asList("6.7.2", "6.8.0", "7.0.0"), + getVersionCollection("7.0.0").getUnreleased() + ); } public void testGetBranch() { @@ -293,6 +316,14 @@ public void testGetBranch() { asList("7.1", "7.2", "7.x"), getVersionCollection("8.0.0") ); + assertUnreleasedBranchNames( + asList("6.7", "6.8", "7.0"), + getVersionCollection("7.1.0") + ); + assertUnreleasedBranchNames( + asList("6.7", "6.8"), + getVersionCollection("7.0.0") + ); } public void testGetGradleProjectName() { @@ -309,13 +340,17 @@ public void testGetGradleProjectName() { getVersionCollection("6.4.2") ); assertUnreleasedGradleProjectNames( - asList("maintenance", "bugfix", "staged"), + asList("maintenance", "bugfix", "minor"), getVersionCollection("6.6.0") ); assertUnreleasedGradleProjectNames( asList("bugfix", "staged", "minor"), getVersionCollection("8.0.0") ); + assertUnreleasedGradleProjectNames( + asList("maintenance", "staged", "minor"), + getVersionCollection("7.1.0") + ); } public void testCompareToAuthoritative() { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java new file mode 100644 index 0000000000000..67bae367c6f9f --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/http/WaitForHttpResourceTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.http; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; + +import java.io.File; +import java.net.URL; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.notNullValue; + +public class WaitForHttpResourceTests extends GradleUnitTestCase { + + public void testBuildTrustStoreFromFile() throws Exception { + final WaitForHttpResource http = new WaitForHttpResource(new URL("https://localhost/")); + final URL ca = getClass().getResource("/ca.p12"); + assertThat(ca, notNullValue()); + http.setTrustStoreFile(new File(ca.getPath())); + http.setTrustStorePassword("password"); + final KeyStore store = http.buildTrustStore(); + final Certificate certificate = store.getCertificate("ca"); + assertThat(certificate, notNullValue()); + assertThat(certificate, instanceOf(X509Certificate.class)); + assertThat(((X509Certificate)certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); + } + + public void testBuildTrustStoreFromCA() throws Exception { + final WaitForHttpResource http = new WaitForHttpResource(new URL("https://localhost/")); + final URL ca = getClass().getResource("/ca.pem"); + assertThat(ca, notNullValue()); + http.setCertificateAuthorities(new File(ca.getPath())); + final KeyStore store = http.buildTrustStore(); + final Certificate certificate = store.getCertificate("cert-0"); + assertThat(certificate, notNullValue()); + assertThat(certificate, instanceOf(X509Certificate.class)); + assertThat(((X509Certificate)certificate).getSubjectDN().toString(), equalTo("CN=Elastic Certificate Tool Autogenerated CA")); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java index e5624a15d92df..d45028d844542 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; /* * Licensed to Elasticsearch under one or more contributor @@ -25,10 +24,8 @@ public class JarHellTaskIT extends GradleIntegrationTestCase { public void testJarHellDetected() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("jarHell")) + BuildResult result = getGradleRunner("jarHell") .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) - .withPluginClasspath() .buildAndFail(); assertTaskFailed(result, ":jarHell"); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java index 39ab8a6734c58..c6e1e2783cebc 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/TestingConventionsTasksIT.java @@ -62,7 +62,7 @@ public void testNoEmptyTasks() { BuildResult result = runner.buildAndFail(); assertOutputContains(result.getOutput(), "Expected at least one test class included in task :empty_test_task:emptyTest, but found none.", - "Expected at least one test class included in task :empty_test_task:emptyTestRandomized, but found none." + "Expected at least one test class included in task :empty_test_task:test, but found none." ); } @@ -71,9 +71,8 @@ public void testAllTestTasksIncluded() { .withArguments("clean", ":all_classes_in_tasks:testingConventions", "-i", "-s"); BuildResult result = runner.buildAndFail(); assertOutputContains(result.getOutput(), - "Test classes are not included in any enabled task (:all_classes_in_tasks:emptyTestRandomized):", - " * org.elasticsearch.gradle.testkit.NamingConventionIT", - " * org.elasticsearch.gradle.testkit.NamingConventionTests" + "Test classes are not included in any enabled task (:all_classes_in_tasks:test):", + " * org.elasticsearch.gradle.testkit.NamingConventionIT" ); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f7a0382cec775..46a9194780c2a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -4,8 +4,12 @@ import org.gradle.testkit.runner.BuildTask; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.TaskOutcome; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; @@ -16,6 +20,9 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { + @Rule + public TemporaryFolder testkitTmpDir = new TemporaryFolder(); + protected File getProjectDir(String name) { File root = new File("src/testKit/"); if (root.exists() == false) { @@ -26,9 +33,16 @@ protected File getProjectDir(String name) { } protected GradleRunner getGradleRunner(String sampleProject) { + File testkit; + try { + testkit = testkitTmpDir.newFolder(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return GradleRunner.create() .withProjectDir(getProjectDir(sampleProject)) - .withPluginClasspath(); + .withPluginClasspath() + .withTestKitDir(testkit); } protected File getBuildDir(String name) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index bb69665026b1d..37e0bfd0fa17e 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,13 +21,20 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Before; import org.junit.Ignore; import java.util.Arrays; -@Ignore // Awaiting a fix in https://github.com/elastic/elasticsearch/issues/37889. public class TestClustersPluginIT extends GradleIntegrationTestCase { + private GradleRunner runner; + + @Before + public void setUp() throws Exception { + runner = getGradleRunner("testclusters"); + } + public void testListClusters() { BuildResult result = getTestClustersRunner("listTestClusters").build(); @@ -83,6 +90,7 @@ public void testUseClusterBySkippedAndWorkingTask() { ); } + @Ignore // https://github.com/elastic/elasticsearch/issues/41256 public void testMultiProject() { BuildResult result = getTestClustersRunner( "user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath() @@ -152,6 +160,14 @@ public void testConfigurationLocked() { ); } + public void testPluginInstalled() { + BuildResult result = getTestClustersRunner(":printLog").build(); + assertTaskSuccessful(result, ":printLog"); + assertStartedAndStoppedOnce(result); + assertOutputContains(result.getOutput(), "-> Installed dummy"); + assertOutputContains(result.getOutput(), "loaded plugin [dummy]"); + } + private void assertNotStarted(BuildResult result) { assertOutputDoesNotContain( result.getOutput(), @@ -165,10 +181,7 @@ private GradleRunner getTestClustersRunner(String... tasks) { arguments[tasks.length] = "-s"; arguments[tasks.length + 1] = "-i"; arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath(); - return GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments(arguments) - .withPluginClasspath(); + return runner.withArguments(arguments); } private void assertStartedAndStoppedOnce(BuildResult result) { @@ -178,4 +191,6 @@ private void assertStartedAndStoppedOnce(BuildResult result) { "Stopping `node{::myTestCluster}`" ); } + + } diff --git a/buildSrc/src/test/resources/ca.p12 b/buildSrc/src/test/resources/ca.p12 new file mode 100644 index 0000000000000..cc44494515b9f Binary files /dev/null and b/buildSrc/src/test/resources/ca.p12 differ diff --git a/buildSrc/src/test/resources/ca.pem b/buildSrc/src/test/resources/ca.pem new file mode 100644 index 0000000000000..8dda1767e4838 --- /dev/null +++ b/buildSrc/src/test/resources/ca.pem @@ -0,0 +1,25 @@ +Bag Attributes + friendlyName: ca + localKeyID: 54 69 6D 65 20 31 35 35 33 37 34 33 38 39 30 38 33 35 +subject=/CN=Elastic Certificate Tool Autogenerated CA +issuer=/CN=Elastic Certificate Tool Autogenerated CA +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVAMQMmDRcXfXLaTp6ep1H8rC3tOrwMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDMyODAzMzEyNloXDTIyMDMyNzAzMzEyNlowNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDT73N6JZeBPyzahc0aNcra +BpUROVGB9wXQqf8JeU4GtH+1qfqUKYKUJTe/DZWc+5Qz1WAKGZEvBySAlgbuncuq +VpLzWxpEui1vRW8JB3gjZgeY3vfErrEWWr95YM0e8rWu4AoAchzqsrG0/+po2eui +cN+8hI6jRKiBv/ZeQqja6KZ8y4Wt4VaNVL53+I7+eWA/aposu6/piUg2wZ/FNhVK +hypcJwDdp3fQaugtPj3y76303jTRgutgd3rtWFuy3MCDLfs3mSQUjO10s93zwLdC +XokyIywijS5CpO8mEuDRu9rb5J1DzwUpUfk+GMObb6rHjFKzSqnM3s+nasypQQ9L +AgMBAAGjUzBRMB0GA1UdDgQWBBQZEW88R95zSzO2tLseEWgI7ugvLzAfBgNVHSME +GDAWgBQZEW88R95zSzO2tLseEWgI7ugvLzAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBEJN0UbL77usVnzIvxKa3GpLBgJQAZtD1ifZppC4w46Bul +1G7Fdc+XMbzZlI4K6cWEdd5dfEssKA8btEtRzdNOqgggBpqrUU0mNlQ+vC22XORU +ykHAu2TsRwoHmuxkd9Et/QyuTFXR4fTiU8rsJuLFOgn+RdEblA0J0gJeIqdWI5Z1 +z13OyZEl6BCQFyrntu2eERxaHEfsJOSBZE4RcecnLNGhIJBXE0Pk4iTiViJF/h7d ++kUUegKx0qewZif2eEZgrz12Vuen9a6bh2i2pNS95vABVVMr8uB+J1BGkNA5YT7J +qtZA2tN//Evng7YDiR+KkB1kvXVZVIi2WPDLD/zu +-----END CERTIFICATE----- diff --git a/buildSrc/src/testKit/elasticsearch.build/build.gradle b/buildSrc/src/testKit/elasticsearch.build/build.gradle index 409367da3146a..7a68fe59baab6 100644 --- a/buildSrc/src/testKit/elasticsearch.build/build.gradle +++ b/buildSrc/src/testKit/elasticsearch.build/build.gradle @@ -16,6 +16,7 @@ repositories { jcenter() repositories { maven { + name "local-repo" url System.getProperty("local.repo.path") } } @@ -27,7 +28,7 @@ forbiddenApisTest.enabled = false // requires dependency on testing fw jarHell.enabled = false // we don't have tests for now -unitTest.enabled = false +test.enabled = false task hello { doFirst { diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle index cd423c9f99f81..cb12ce03f5191 100644 --- a/buildSrc/src/testKit/jarHell/build.gradle +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -15,6 +15,7 @@ repositories { jcenter() repositories { maven { + name "local" url System.getProperty("local.repo.path") } } diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index d02240c0ad2cd..4e9400d50cc8c 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -5,15 +5,21 @@ plugins { allprojects { all -> repositories { + flatDir { + dir System.getProperty("test.local-test-downloads-path") + } maven { + name "local" url System.getProperty("local.repo.path") } String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision") if (luceneSnapshotRevision != null) { maven { - url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + name "lucene-snapshots" + url "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision } } + jcenter() } @@ -22,9 +28,10 @@ allprojects { all -> all.testClusters { myTestCluster { - distribution = 'ZIP' + distribution = 'DEFAULT' version = System.getProperty("test.version_under_test") javaHome = file(System.getProperty('java.home')) + plugin file("${project(":dummyPlugin").buildDir}/distributions/dummy-${version}.zip") } } @@ -40,6 +47,19 @@ allprojects { all -> println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } + syncTestClustersArtifacts { + dependsOn ":dummyPlugin:bundlePlugin" + } + } +} + +task printLog { + useCluster testClusters.myTestCluster + doFirst { + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" + testClusters.myTestCluster.logLines().each { + println it + } } } @@ -77,6 +97,6 @@ task illegalConfigAlter { useCluster testClusters.myTestCluster doFirst { println "Going to alter configuration after use" - testClusters.myTestCluster.distribution = 'ZIP_OSS' + testClusters.myTestCluster.distribution = 'OSS' } } diff --git a/buildSrc/src/testKit/testclusters/dummyPlugin/build.gradle b/buildSrc/src/testKit/testclusters/dummyPlugin/build.gradle new file mode 100644 index 0000000000000..73833fc80ccca --- /dev/null +++ b/buildSrc/src/testKit/testclusters/dummyPlugin/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'elasticsearch.esplugin' + +version = System.getProperty("test.version_under_test") + +esplugin { + name 'dummy' + description 'A dummy plugin used for testing' + classname 'DummyPlugin' + licenseFile rootProject.file('empty.txt') + noticeFile rootProject.file('empty.txt') +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters/dummyPlugin/src/main/java/DummyPlugin.java b/buildSrc/src/testKit/testclusters/dummyPlugin/src/main/java/DummyPlugin.java new file mode 100644 index 0000000000000..fff45b7365e18 --- /dev/null +++ b/buildSrc/src/testKit/testclusters/dummyPlugin/src/main/java/DummyPlugin.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.common.settings.Settings; + +import java.nio.file.Path; + +public class DummyPlugin extends Plugin { + + public DummyPlugin(final Settings settings, final Path configPath) { + } + +} diff --git a/test/fixtures/hdfs-fixture/docker-compose.yml b/buildSrc/src/testKit/testclusters/empty.txt similarity index 100% rename from test/fixtures/hdfs-fixture/docker-compose.yml rename to buildSrc/src/testKit/testclusters/empty.txt diff --git a/buildSrc/src/testKit/testclusters/settings.gradle b/buildSrc/src/testKit/testclusters/settings.gradle index 6549a93801b63..0b90220482d5d 100644 --- a/buildSrc/src/testKit/testclusters/settings.gradle +++ b/buildSrc/src/testKit/testclusters/settings.gradle @@ -1,4 +1,5 @@ include 'dummyPlugin' include ':alpha' include ':bravo' -include ':charlie' \ No newline at end of file +include ':charlie' +include 'dummyPlugin' \ No newline at end of file diff --git a/buildSrc/src/testKit/testingConventions/build.gradle b/buildSrc/src/testKit/testingConventions/build.gradle index 0052245099112..dd82a18085fa1 100644 --- a/buildSrc/src/testKit/testingConventions/build.gradle +++ b/buildSrc/src/testKit/testingConventions/build.gradle @@ -25,22 +25,16 @@ allprojects { baseClasses = [] } } - - unitTest.enabled = false } project(':empty_test_task') { task emptyTest(type: Test) { } - - task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { - - } } project(':all_classes_in_tasks') { - task emptyTestRandomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/Convention*" } } @@ -54,14 +48,14 @@ project(':not_implementing_base') { baseClass 'org.elasticsearch.gradle.testkit.Integration' } } - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } } project(':valid_setup_no_base') { - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } @@ -72,7 +66,7 @@ project(':tests_in_main') { } project (':valid_setup_with_base') { - task randomized(type: com.carrotsearch.gradle.junit4.RandomizedTestingTask) { + test { include "**/*IT.class" include "**/*Tests.class" } diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 42e0a22cceaa5..725be970fd952 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -14,6 +14,7 @@ repositories { * - version 0.0.2 has the same class and one extra file just to make the jar different */ maven { + name = "local-test" url = file("sample_jars/build/testrepo") } jcenter() diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 118ab2f905f74..9b06ecb8da2e0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,9 +1,14 @@ -elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-83f9835 +elasticsearch = 7.1.2 +lucene = 8.0.0 + +bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 # optional dependencies spatial4j = 0.7 jts = 1.15.0 +# note that ingest-geoip has a hard-coded version; if you modify this version, +# you should also inspect that version to see if it can be advanced along with +# the com.maxmind.geoip2:geoip2 dependency jackson = 2.8.11 snakeyaml = 1.17 icu4j = 62.1 @@ -18,6 +23,12 @@ jna = 4.5.1 netty = 4.1.32.Final joda = 2.10.1 +# when updating this version, you need to ensure compatibility with: +# - plugins/ingest-attachment (transitive dependency, check the upstream POM) +# - distribution/tools/plugin-cli +# - x-pack/plugin/security +bouncycastle = 1.61 + # test dependencies randomizedrunner = 2.7.1 junit = 4.12 diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index f8beeafd14e09..a53f102034009 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -29,7 +29,7 @@ archivesBaseName = 'client-benchmarks' mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain' // never try to invoke tests on the benchmark project - there aren't any -unitTest.enabled = false +test.enabled = false dependencies { compile 'org.apache.commons:commons-math3:3.2' diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index 6df52237e367e..b5a5fb5dc5ed1 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -36,5 +36,5 @@ dependenciesInfo.enabled = false compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" // no unit tests -unitTest.enabled = false +test.enabled = false integTest.enabled = false diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 9c632afe19192..8805af367a80e 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -76,7 +76,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, - null, defaultPipeline, null, true, request.getXContentType()); + null, defaultPipeline, true, request.getXContentType()); // short circuit the call to the transport layer return channel -> { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index 9a17dabf39504..373b94124d43e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -23,8 +23,11 @@ import org.elasticsearch.client.ccr.CcrStatsRequest; import org.elasticsearch.client.ccr.CcrStatsResponse; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.FollowInfoRequest; +import org.elasticsearch.client.ccr.FollowInfoResponse; import org.elasticsearch.client.ccr.FollowStatsRequest; import org.elasticsearch.client.ccr.FollowStatsResponse; +import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.PauseFollowRequest; @@ -34,6 +37,7 @@ import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.BroadcastResponse; import java.io.IOException; import java.util.Collections; @@ -231,6 +235,48 @@ public void unfollowAsync(UnfollowRequest request, ); } + /** + * Instructs an index acting as a leader index to forget the specified follower index. + * + * See the docs for more details + * on the intended usage of this API. + * + * @param request the request + * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if the defaults are acceptable. + * @return the response + * @throws IOException if an I/O exception occurs while executing this request + */ + public BroadcastResponse forgetFollower(final ForgetFollowerRequest request, final RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::forgetFollower, + options, + BroadcastResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Asynchronously instructs an index acting as a leader index to forget the specified follower index. + * + * See the docs for more details + * on the intended usage of this API. + * + * @param request the request + * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if the defaults are acceptable. + */ + public void forgetFollowerAsync( + final ForgetFollowerRequest request, + final RequestOptions options, + final ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::forgetFollower, + options, + BroadcastResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Stores an auto follow pattern. * @@ -452,4 +498,46 @@ public void getFollowStatsAsync(FollowStatsRequest request, ); } + /** + * Gets follow info for specific indices. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public FollowInfoResponse getFollowInfo(FollowInfoRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::getFollowInfo, + options, + FollowInfoResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously gets follow info for specific indices. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public void getFollowInfoAsync(FollowInfoRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::getFollowInfo, + options, + FollowInfoResponse::fromXContent, + listener, + Collections.emptySet() + ); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index 526db2a86a761..a3f5d7e79fda7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -25,7 +25,9 @@ import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ccr.CcrStatsRequest; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.FollowInfoRequest; import org.elasticsearch.client.ccr.FollowStatsRequest; +import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; @@ -78,6 +80,17 @@ static Request unfollow(UnfollowRequest unfollowRequest) { return new Request(HttpPost.METHOD_NAME, endpoint); } + static Request forgetFollower(final ForgetFollowerRequest forgetFollowerRequest) throws IOException { + final String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(forgetFollowerRequest.leaderIndex()) + .addPathPartAsIs("_ccr") + .addPathPartAsIs("forget_follower") + .build(); + final Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(forgetFollowerRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request putAutoFollowPattern(PutAutoFollowPatternRequest putAutoFollowPatternRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_ccr", "auto_follow") @@ -119,4 +132,12 @@ static Request getFollowStats(FollowStatsRequest followStatsRequest) { return new Request(HttpGet.METHOD_NAME, endpoint); } + static Request getFollowInfo(FollowInfoRequest followInfoRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(followInfoRequest.getFollowerIndex()) + .addPathPartAsIs("_ccr", "info") + .build(); + return new Request(HttpGet.METHOD_NAME, endpoint); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java index 2f5692d0ed9c8..a95115f71faa8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java @@ -19,15 +19,9 @@ package org.elasticsearch.client; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.migration.DeprecationInfoRequest; import org.elasticsearch.client.migration.DeprecationInfoResponse; -import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.tasks.TaskSubmissionResponse; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.client.migration.IndexUpgradeRequest; - import java.io.IOException; import java.util.Collections; @@ -47,34 +41,6 @@ public final class MigrationClient { this.restHighLevelClient = restHighLevelClient; } - /** - * Get Migration Assistance for one or more indices - * - * @param request the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response - */ - public IndexUpgradeInfoResponse getAssistance(IndexUpgradeInfoRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, MigrationRequestConverters::getMigrationAssistance, options, - IndexUpgradeInfoResponse::fromXContent, Collections.emptySet()); - } - - public BulkByScrollResponse upgrade(IndexUpgradeRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, MigrationRequestConverters::migrate, options, - BulkByScrollResponse::fromXContent, Collections.emptySet()); - } - - public TaskSubmissionResponse submitUpgradeTask(IndexUpgradeRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, MigrationRequestConverters::submitMigrateTask, options, - TaskSubmissionResponse::fromXContent, Collections.emptySet()); - } - - public void upgradeAsync(IndexUpgradeRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, MigrationRequestConverters::migrate, options, - BulkByScrollResponse::fromXContent, listener, Collections.emptySet()); - } - /** * Get deprecation info for one or more indices * @param request the request diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java index 2293c855bf2ca..77d38c13bc0a9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationRequestConverters.java @@ -20,35 +20,13 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; import org.elasticsearch.client.migration.DeprecationInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeRequest; final class MigrationRequestConverters { private MigrationRequestConverters() { } - static Request getMigrationAssistance(IndexUpgradeInfoRequest indexUpgradeInfoRequest) { - RequestConverters.EndpointBuilder endpointBuilder = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_migration", "assistance") - .addCommaSeparatedPathParts(indexUpgradeInfoRequest.indices()); - String endpoint = endpointBuilder.build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - RequestConverters.Params parameters = new RequestConverters.Params(request); - parameters.withIndicesOptions(indexUpgradeInfoRequest.indicesOptions()); - return request; - } - - static Request migrate(IndexUpgradeRequest indexUpgradeRequest) { - return prepareMigrateRequest(indexUpgradeRequest, true); - } - - static Request submitMigrateTask(IndexUpgradeRequest indexUpgradeRequest) { - return prepareMigrateRequest(indexUpgradeRequest, false); - } - static Request getDeprecationInfo(DeprecationInfoRequest deprecationInfoRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addCommaSeparatedPathParts(deprecationInfoRequest.getIndices()) @@ -57,18 +35,4 @@ static Request getDeprecationInfo(DeprecationInfoRequest deprecationInfoRequest) return new Request(HttpGet.METHOD_NAME, endpoint); } - - private static Request prepareMigrateRequest(IndexUpgradeRequest indexUpgradeRequest, boolean waitForCompletion) { - String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_migration", "upgrade") - .addPathPart(indexUpgradeRequest.index()) - .build(); - - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - RequestConverters.Params params = new RequestConverters.Params(request) - .withWaitForCompletion(waitForCompletion); - - return request; - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 51ed51d1a696a..4662f8b49bfbb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -94,9 +94,9 @@ import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter; import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid; -import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -427,7 +427,7 @@ public IndexLifecycleClient indexLifecycle() { } /** - * Provides methods for accessing the Elastic Licensed Licensing APIs that + * Provides methods for accessing the Elastic Licensed Migration APIs that * are shipped with the default distribution of Elasticsearch. All of * these APIs will 404 if run against the OSS distribution of Elasticsearch. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java index eb9b5e80767db..37b0c5fa2c5d9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -41,6 +43,44 @@ public class FollowConfig { static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); + private static final ObjectParser PARSER = new ObjectParser<>( + "follow_config", + true, + FollowConfig::new); + + static { + PARSER.declareInt(FollowConfig::setMaxReadRequestOperationCount, MAX_READ_REQUEST_OPERATION_COUNT); + PARSER.declareInt(FollowConfig::setMaxOutstandingReadRequests, MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareField( + FollowConfig::setMaxReadRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), + MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(FollowConfig::setMaxWriteRequestOperationCount, MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + FollowConfig::setMaxWriteRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), + MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(FollowConfig::setMaxOutstandingWriteRequests, MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(FollowConfig::setMaxWriteBufferCount, MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + FollowConfig::setMaxWriteBufferSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), + MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareField(FollowConfig::setMaxRetryDelay, + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY_FIELD.getPreferredName()), + MAX_RETRY_DELAY_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(FollowConfig::setReadPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), + READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + } + + static FollowConfig fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + private Integer maxReadRequestOperationCount; private Integer maxOutstandingReadRequests; private ByteSizeValue maxReadRequestSize; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoRequest.java similarity index 66% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoRequest.java index 86250fdaec274..3fe9136fcbdb7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoRequest.java @@ -17,14 +17,21 @@ * under the License. */ -package org.elasticsearch.client.migration; +package org.elasticsearch.client.ccr; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.client.Validatable; -public class IndexUpgradeInfoRequestTests extends ESTestCase { +import java.util.Objects; - public void testNullIndices() { - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); +public final class FollowInfoRequest implements Validatable { + + private final String followerIndex; + + public FollowInfoRequest(String followerIndex) { + this.followerIndex = Objects.requireNonNull(followerIndex); + } + + public String getFollowerIndex() { + return followerIndex; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoResponse.java new file mode 100644 index 0000000000000..3e15db318a3c1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowInfoResponse.java @@ -0,0 +1,178 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; +import java.util.Objects; + +public final class FollowInfoResponse { + + static final ParseField FOLLOWER_INDICES_FIELD = new ParseField("follower_indices"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "indices", + true, + args -> { + @SuppressWarnings("unchecked") + List infos = (List) args[0]; + return new FollowInfoResponse(infos); + }); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FollowerInfo.PARSER, FOLLOWER_INDICES_FIELD); + } + + public static FollowInfoResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private final List infos; + + FollowInfoResponse(List infos) { + this.infos = infos; + } + + public List getInfos() { + return infos; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FollowInfoResponse that = (FollowInfoResponse) o; + return infos.equals(that.infos); + } + + @Override + public int hashCode() { + return Objects.hash(infos); + } + + public static final class FollowerInfo { + + static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); + static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + static final ParseField STATUS_FIELD = new ParseField("status"); + static final ParseField PARAMETERS_FIELD = new ParseField("parameters"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "follower_info", + true, + args -> { + return new FollowerInfo((String) args[0], (String) args[1], (String) args[2], + Status.fromString((String) args[3]), (FollowConfig) args[4]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOWER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), REMOTE_CLUSTER_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), STATUS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> FollowConfig.fromXContent(p), PARAMETERS_FIELD); + } + + private final String followerIndex; + private final String remoteCluster; + private final String leaderIndex; + private final Status status; + private final FollowConfig parameters; + + FollowerInfo(String followerIndex, String remoteCluster, String leaderIndex, Status status, + FollowConfig parameters) { + this.followerIndex = followerIndex; + this.remoteCluster = remoteCluster; + this.leaderIndex = leaderIndex; + this.status = status; + this.parameters = parameters; + } + + public String getFollowerIndex() { + return followerIndex; + } + + public String getRemoteCluster() { + return remoteCluster; + } + + public String getLeaderIndex() { + return leaderIndex; + } + + public Status getStatus() { + return status; + } + + public FollowConfig getParameters() { + return parameters; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FollowerInfo that = (FollowerInfo) o; + return Objects.equals(followerIndex, that.followerIndex) && + Objects.equals(remoteCluster, that.remoteCluster) && + Objects.equals(leaderIndex, that.leaderIndex) && + status == that.status && + Objects.equals(parameters, that.parameters); + } + + @Override + public int hashCode() { + return Objects.hash(followerIndex, remoteCluster, leaderIndex, status, parameters); + } + + } + + public enum Status { + + ACTIVE("active"), + PAUSED("paused"); + + private final String name; + + Status(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public static Status fromString(String value) { + switch (value) { + case "active": + return Status.ACTIVE; + case "paused": + return Status.PAUSED; + default: + throw new IllegalArgumentException("unexpected status value [" + value + "]"); + } + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java new file mode 100644 index 0000000000000..3d20a6d934d9d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Represents a forget follower request. Note that this an expert API intended to be used only when unfollowing a follower index fails to + * remove the follower retention leases. Please be sure that you understand the purpose this API before using. + */ +public final class ForgetFollowerRequest implements ToXContentObject, Validatable { + + private final String followerCluster; + + private final String followerIndex; + + private final String followerIndexUUID; + + private final String leaderRemoteCluster; + + private final String leaderIndex; + + /** + * The name of the leader index. + * + * @return the name of the leader index + */ + public String leaderIndex() { + return leaderIndex; + } + + /** + * Construct a forget follower request. + * + * @param followerCluster the name of the cluster containing the follower index to forget + * @param followerIndex the name of follower index + * @param followerIndexUUID the UUID of the follower index + * @param leaderRemoteCluster the alias of the remote cluster containing the leader index from the perspective of the follower index + * @param leaderIndex the name of the leader index + */ + public ForgetFollowerRequest( + final String followerCluster, + final String followerIndex, + final String followerIndexUUID, + final String leaderRemoteCluster, + final String leaderIndex) { + this.followerCluster = Objects.requireNonNull(followerCluster); + this.followerIndex = Objects.requireNonNull(followerIndex); + this.followerIndexUUID = Objects.requireNonNull(followerIndexUUID); + this.leaderRemoteCluster = Objects.requireNonNull(leaderRemoteCluster); + this.leaderIndex = Objects.requireNonNull(leaderIndex); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field("follower_cluster", followerCluster); + builder.field("follower_index", followerIndex); + builder.field("follower_index_uuid", followerIndexUUID); + builder.field("leader_remote_cluster", leaderRemoteCluster); + } + builder.endObject(); + return builder; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java new file mode 100644 index 0000000000000..3665ba5bf5009 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; + +/** + * Represents a response to a request that is broadcast to a collection of shards. + */ +public class BroadcastResponse { + + private final Shards shards; + + /** + * Represents the shard-level summary of the response execution. + * + * @return the shard-level response summary + */ + public Shards shards() { + return shards; + } + + BroadcastResponse(final Shards shards) { + this.shards = Objects.requireNonNull(shards); + } + + private static final ParseField SHARDS_FIELD = new ParseField("_shards"); + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "broadcast_response", + a -> new BroadcastResponse((Shards) a[0])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); + } + + /** + * Parses a broadcast response. + * + * @param parser the parser + * @return a broadcast response parsed from the specified parser + * @throws IOException if an I/O exception occurs parsing the response + */ + public static BroadcastResponse fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Represents the results of a collection of shards on which a request was executed against. + */ + public static class Shards { + + private final int total; + + /** + * The total number of shards on which a request was executed against. + * + * @return the total number of shards + */ + public int total() { + return total; + } + + private final int successful; + + /** + * The number of successful shards on which a request was executed against. + * + * @return the number of successful shards + */ + public int successful() { + return successful; + } + + private final int skipped; + + /** + * The number of shards skipped by the request. + * + * @return the number of skipped shards + */ + public int skipped() { + return skipped; + } + + private final int failed; + + /** + * The number of shards on which a request failed to be executed against. + * + * @return the number of failed shards + */ + public int failed() { + return failed; + } + + private final Collection failures; + + /** + * The failures corresponding to the shards on which a request failed to be executed against. Note that the number of failures might + * not match {@link #failed()} as some responses group together shard failures. + * + * @return the failures + */ + public Collection failures() { + return failures; + } + + Shards( + final int total, + final int successful, + final int skipped, + final int failed, + final Collection failures) { + this.total = total; + this.successful = successful; + this.skipped = skipped; + this.failed = failed; + this.failures = Collections.unmodifiableCollection(Objects.requireNonNull(failures)); + } + + private static final ParseField TOTAL_FIELD = new ParseField("total"); + private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); + private static final ParseField SKIPPED_FIELD = new ParseField("skipped"); + private static final ParseField FAILED_FIELD = new ParseField("failed"); + private static final ParseField FAILURES_FIELD = new ParseField("failures"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser SHARDS_PARSER = new ConstructingObjectParser<>( + "shards", + a -> new Shards( + (int) a[0], // total + (int) a[1], // successful + a[2] == null ? 0 : (int) a[2], // skipped + (int) a[3], // failed + a[4] == null ? Collections.emptyList() : (Collection) a[4])); // failures + + static { + SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), TOTAL_FIELD); + SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SUCCESSFUL_FIELD); + SHARDS_PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SKIPPED_FIELD); + SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), FAILED_FIELD); + SHARDS_PARSER.declareObjectArray( + ConstructingObjectParser.optionalConstructorArg(), + DefaultShardOperationFailedException.PARSER, FAILURES_FIELD); + } + + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java index 4d2a000a00c89..3040b8a121cf7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java @@ -108,10 +108,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java deleted file mode 100644 index 7a93bce1376b5..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequest.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.migration; - -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.TimedRequest; -import org.elasticsearch.common.Strings; - -import java.util.Arrays; -import java.util.Objects; - -/** - * A request for retrieving upgrade information - * Part of Migration API - */ -public class IndexUpgradeInfoRequest extends TimedRequest implements IndicesRequest.Replaceable { - - private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); - - public IndexUpgradeInfoRequest(String... indices) { - indices(indices); - } - - @Override - public String[] indices() { - return indices; - } - - @Override - public IndexUpgradeInfoRequest indices(String... indices) { - this.indices = Objects.requireNonNull(indices, "indices cannot be null"); - return this; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - public void indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = indicesOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IndexUpgradeInfoRequest request = (IndexUpgradeInfoRequest) o; - return Arrays.equals(indices, request.indices) && - Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()); - } - - @Override - public int hashCode() { - return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString()); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java deleted file mode 100644 index 29b0b1907969b..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponse.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.migration; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * Response object that contains information about indices to be upgraded - */ -public class IndexUpgradeInfoResponse { - - private static final ParseField INDICES = new ParseField("indices"); - private static final ParseField ACTION_REQUIRED = new ParseField("action_required"); - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("IndexUpgradeInfoResponse", - true, - (a, c) -> { - @SuppressWarnings("unchecked") - Map map = (Map)a[0]; - Map actionsRequired = map.entrySet().stream() - .filter(e -> { - if (e.getValue() instanceof Map == false) { - return false; - } - @SuppressWarnings("unchecked") - Map value =(Map)e.getValue(); - return value.containsKey(ACTION_REQUIRED.getPreferredName()); - }) - .collect(Collectors.toMap( - Map.Entry::getKey, - e -> { - @SuppressWarnings("unchecked") - Map value = (Map) e.getValue(); - return UpgradeActionRequired.fromString((String)value.get(ACTION_REQUIRED.getPreferredName())); - } - )); - return new IndexUpgradeInfoResponse(actionsRequired); - }); - - static { - PARSER.declareObject(constructorArg(), (p, c) -> p.map(), INDICES); - } - - - private final Map actions; - - public IndexUpgradeInfoResponse(Map actions) { - this.actions = actions; - } - - public Map getActions() { - return actions; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IndexUpgradeInfoResponse response = (IndexUpgradeInfoResponse) o; - return Objects.equals(actions, response.actions); - } - - @Override - public int hashCode() { - return Objects.hash(actions); - } - - public static IndexUpgradeInfoResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java deleted file mode 100644 index 26b7b1e815d7f..0000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.migration; - -import java.util.Locale; - -/** - * Indicates the type of the upgrade required for the index - */ -public enum UpgradeActionRequired { - NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed - UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required - REINDEX, // The index should be reindex - UPGRADE; // The index should go through the upgrade procedure - - public static UpgradeActionRequired fromString(String value) { - return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); - } - - @Override - public String toString() { - return name().toLowerCase(Locale.ROOT); - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java index 7afef0785fe38..c0e16622ba593 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/DataCounts.java @@ -34,7 +34,7 @@ * Job processed record counts. *

* The getInput... methods return the actual number of - * fields/records sent the the API including invalid records. + * fields/records sent the API including invalid records. * The getProcessed... methods are the number sent to the * Engine. *

diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 52110989e1715..762e927551b8b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -447,7 +447,7 @@ private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String l .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); } else { BytesArray data = bytesBulkRequest(localIndex, localType, i); - processor.add(data, globalIndex, globalType, globalPipeline, null, XContentType.JSON); + processor.add(data, globalIndex, globalType, globalPipeline, XContentType.JSON); if (localType != null) { // If the payload contains types, parsing it into a bulk request results in a warning. diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index ee2685dee6d92..13e0af5f0b139 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -32,8 +32,11 @@ import org.elasticsearch.client.ccr.CcrStatsRequest; import org.elasticsearch.client.ccr.CcrStatsResponse; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.FollowInfoRequest; +import org.elasticsearch.client.ccr.FollowInfoResponse; import org.elasticsearch.client.ccr.FollowStatsRequest; import org.elasticsearch.client.ccr.FollowStatsResponse; +import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.IndicesFollowStats; @@ -45,19 +48,24 @@ import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.BroadcastResponse; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.Before; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -113,6 +121,15 @@ public void testIndexFollowing() throws Exception { try { assertBusy(() -> { + FollowInfoRequest followInfoRequest = new FollowInfoRequest("follower"); + FollowInfoResponse followInfoResponse = + execute(followInfoRequest, ccrClient::getFollowInfo, ccrClient::getFollowInfoAsync); + assertThat(followInfoResponse.getInfos().size(), equalTo(1)); + assertThat(followInfoResponse.getInfos().get(0).getFollowerIndex(), equalTo("follower")); + assertThat(followInfoResponse.getInfos().get(0).getLeaderIndex(), equalTo("leader")); + assertThat(followInfoResponse.getInfos().get(0).getRemoteCluster(), equalTo("local_cluster")); + assertThat(followInfoResponse.getInfos().get(0).getStatus(), equalTo(FollowInfoResponse.Status.ACTIVE)); + FollowStatsRequest followStatsRequest = new FollowStatsRequest("follower"); FollowStatsResponse followStatsResponse = execute(followStatsRequest, ccrClient::getFollowStats, ccrClient::getFollowStatsAsync); @@ -170,6 +187,17 @@ public void testIndexFollowing() throws Exception { pauseFollowResponse = execute(pauseFollowRequest, ccrClient::pauseFollow, ccrClient::pauseFollowAsync); assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + assertBusy(() -> { + FollowInfoRequest followInfoRequest = new FollowInfoRequest("follower"); + FollowInfoResponse followInfoResponse = + execute(followInfoRequest, ccrClient::getFollowInfo, ccrClient::getFollowInfoAsync); + assertThat(followInfoResponse.getInfos().size(), equalTo(1)); + assertThat(followInfoResponse.getInfos().get(0).getFollowerIndex(), equalTo("follower")); + assertThat(followInfoResponse.getInfos().get(0).getLeaderIndex(), equalTo("leader")); + assertThat(followInfoResponse.getInfos().get(0).getRemoteCluster(), equalTo("local_cluster")); + assertThat(followInfoResponse.getInfos().get(0).getStatus(), equalTo(FollowInfoResponse.Status.PAUSED)); + }); + // Need to close index prior to unfollowing it: CloseIndexRequest closeIndexRequest = new CloseIndexRequest("follower"); org.elasticsearch.action.support.master.AcknowledgedResponse closeIndexReponse = @@ -181,6 +209,61 @@ public void testIndexFollowing() throws Exception { assertThat(unfollowResponse.isAcknowledged(), is(true)); } + public void testForgetFollower() throws IOException { + final CcrClient ccrClient = highLevelClient().ccr(); + + final CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + final Map settings = new HashMap<>(3); + final int numberOfShards = randomIntBetween(1, 2); + settings.put("index.number_of_replicas", "0"); + settings.put("index.number_of_shards", Integer.toString(numberOfShards)); + settings.put("index.soft_deletes.enabled", Boolean.TRUE.toString()); + createIndexRequest.settings(settings); + final CreateIndexResponse response = highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + + final PutFollowRequest putFollowRequest = new PutFollowRequest("local_cluster", "leader", "follower", ActiveShardCount.ONE); + final PutFollowResponse putFollowResponse = execute(putFollowRequest, ccrClient::putFollow, ccrClient::putFollowAsync); + assertTrue(putFollowResponse.isFollowIndexCreated()); + assertTrue(putFollowResponse.isFollowIndexShardsAcked()); + assertTrue(putFollowResponse.isIndexFollowingStarted()); + + final String clusterName = highLevelClient().info(RequestOptions.DEFAULT).getClusterName().value(); + + final Request statsRequest = new Request("GET", "/follower/_stats"); + final Response statsResponse = client().performRequest(statsRequest); + final ObjectPath statsObjectPath = ObjectPath.createFromResponse(statsResponse); + final String followerIndexUUID = statsObjectPath.evaluate("indices.follower.uuid"); + + final PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = execute(pauseFollowRequest, ccrClient::pauseFollow, ccrClient::pauseFollowAsync); + assertTrue(pauseFollowResponse.isAcknowledged()); + + final ForgetFollowerRequest forgetFollowerRequest = + new ForgetFollowerRequest(clusterName, "follower", followerIndexUUID, "local_cluster", "leader"); + final BroadcastResponse forgetFollowerResponse = + execute(forgetFollowerRequest, ccrClient::forgetFollower, ccrClient::forgetFollowerAsync); + assertThat(forgetFollowerResponse.shards().total(), equalTo(numberOfShards)); + assertThat(forgetFollowerResponse.shards().successful(), equalTo(numberOfShards)); + assertThat(forgetFollowerResponse.shards().skipped(), equalTo(0)); + assertThat(forgetFollowerResponse.shards().failed(), equalTo(0)); + assertThat(forgetFollowerResponse.shards().failures(), empty()); + + final Request retentionLeasesRequest = new Request("GET", "/leader/_stats"); + retentionLeasesRequest.addParameter("level", "shards"); + final Response retentionLeasesResponse = client().performRequest(retentionLeasesRequest); + final Map shardsStats = ObjectPath.createFromResponse(retentionLeasesResponse).evaluate("indices.leader.shards"); + assertThat(shardsStats.keySet(), hasSize(numberOfShards)); + for (int i = 0; i < numberOfShards; i++) { + final List shardStats = (List) shardsStats.get(Integer.toString(i)); + assertThat(shardStats, hasSize(1)); + final Map shardStatsAsMap = (Map) shardStats.get(0); + final Map retentionLeasesStats = (Map) shardStatsAsMap.get("retention_leases"); + final List leases = (List) retentionLeasesStats.get("leases"); + assertThat(leases, empty()); + } + } + public void testAutoFollowing() throws Exception { CcrClient ccrClient = highLevelClient().ccr(); PutAutoFollowPatternRequest putAutoFollowPatternRequest = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java new file mode 100644 index 0000000000000..393b7b9ba6f20 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java @@ -0,0 +1,206 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.ccr.CcrStatsRequest; +import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.FollowConfig; +import org.elasticsearch.client.ccr.FollowInfoRequest; +import org.elasticsearch.client.ccr.FollowStatsRequest; +import org.elasticsearch.client.ccr.ForgetFollowerRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.PauseFollowRequest; +import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.PutFollowRequest; +import org.elasticsearch.client.ccr.ResumeFollowRequest; +import org.elasticsearch.client.ccr.UnfollowRequest; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class CcrRequestConvertersTests extends ESTestCase { + + public void testPutFollow() throws Exception { + PutFollowRequest putFollowRequest = new PutFollowRequest(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), + randomBoolean() ? randomFrom(ActiveShardCount.NONE, ActiveShardCount.ONE, ActiveShardCount.DEFAULT, ActiveShardCount.ALL) : null + ); + randomizeRequest(putFollowRequest); + Request result = CcrRequestConverters.putFollow(putFollowRequest); + assertThat(result.getMethod(), equalTo(HttpPut.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + putFollowRequest.getFollowerIndex() + "/_ccr/follow")); + if (putFollowRequest.waitForActiveShards() != null && putFollowRequest.waitForActiveShards() != ActiveShardCount.DEFAULT) { + String expectedValue = putFollowRequest.waitForActiveShards().toString().toLowerCase(Locale.ROOT); + assertThat(result.getParameters().get("wait_for_active_shards"), equalTo(expectedValue)); + } else { + assertThat(result.getParameters().size(), equalTo(0)); + } + RequestConvertersTests.assertToXContentBody(putFollowRequest, result.getEntity()); + } + + public void testPauseFollow() { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(randomAlphaOfLength(4)); + Request result = CcrRequestConverters.pauseFollow(pauseFollowRequest); + assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + pauseFollowRequest.getFollowerIndex() + "/_ccr/pause_follow")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testResumeFollow() throws Exception { + ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest(randomAlphaOfLength(4)); + Request result = CcrRequestConverters.resumeFollow(resumeFollowRequest); + assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + resumeFollowRequest.getFollowerIndex() + "/_ccr/resume_follow")); + assertThat(result.getParameters().size(), equalTo(0)); + RequestConvertersTests.assertToXContentBody(resumeFollowRequest, result.getEntity()); + } + + public void testUnfollow() { + UnfollowRequest pauseFollowRequest = new UnfollowRequest(randomAlphaOfLength(4)); + Request result = CcrRequestConverters.unfollow(pauseFollowRequest); + assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + pauseFollowRequest.getFollowerIndex() + "/_ccr/unfollow")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testForgetFollower() throws IOException { + final ForgetFollowerRequest request = new ForgetFollowerRequest( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomAlphaOfLength(8)); + final Request convertedRequest = CcrRequestConverters.forgetFollower(request); + assertThat(convertedRequest.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(convertedRequest.getEndpoint(), equalTo("/" + request.leaderIndex() + "/_ccr/forget_follower")); + assertThat(convertedRequest.getParameters().keySet(), empty()); + RequestConvertersTests.assertToXContentBody(request, convertedRequest.getEntity()); + } + + public void testPutAutofollowPattern() throws Exception { + PutAutoFollowPatternRequest putAutoFollowPatternRequest = new PutAutoFollowPatternRequest(randomAlphaOfLength(4), + randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(4, 4, false))); + if (randomBoolean()) { + putAutoFollowPatternRequest.setFollowIndexNamePattern(randomAlphaOfLength(4)); + } + randomizeRequest(putAutoFollowPatternRequest); + + Request result = CcrRequestConverters.putAutoFollowPattern(putAutoFollowPatternRequest); + assertThat(result.getMethod(), equalTo(HttpPut.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + putAutoFollowPatternRequest.getName())); + assertThat(result.getParameters().size(), equalTo(0)); + RequestConvertersTests.assertToXContentBody(putAutoFollowPatternRequest, result.getEntity()); + } + + public void testDeleteAutofollowPattern() throws Exception { + DeleteAutoFollowPatternRequest deleteAutoFollowPatternRequest = new DeleteAutoFollowPatternRequest(randomAlphaOfLength(4)); + + Request result = CcrRequestConverters.deleteAutoFollowPattern(deleteAutoFollowPatternRequest); + assertThat(result.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + deleteAutoFollowPatternRequest.getName())); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testGetAutofollowPattern() throws Exception { + GetAutoFollowPatternRequest deleteAutoFollowPatternRequest = new GetAutoFollowPatternRequest(randomAlphaOfLength(4)); + + Request result = CcrRequestConverters.getAutoFollowPattern(deleteAutoFollowPatternRequest); + assertThat(result.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + deleteAutoFollowPatternRequest.getName())); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testGetCcrStats() throws Exception { + CcrStatsRequest ccrStatsRequest = new CcrStatsRequest(); + Request result = CcrRequestConverters.getCcrStats(ccrStatsRequest); + assertThat(result.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/stats")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testGetFollowStats() throws Exception { + FollowStatsRequest followStatsRequest = new FollowStatsRequest(randomAlphaOfLength(4)); + Request result = CcrRequestConverters.getFollowStats(followStatsRequest); + assertThat(result.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + followStatsRequest.getFollowerIndex() + "/_ccr/stats")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testGetFollowInfo() throws Exception { + FollowInfoRequest followInfoRequest = new FollowInfoRequest(randomAlphaOfLength(4)); + Request result = CcrRequestConverters.getFollowInfo(followInfoRequest); + assertThat(result.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/" + followInfoRequest.getFollowerIndex() + "/_ccr/info")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + private static void randomizeRequest(FollowConfig request) { + if (randomBoolean()) { + request.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + request.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 0e1834cd3ac3b..e78e7ec7ca6d3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -165,10 +165,8 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.status(), equalTo(RestStatus.OK)); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); - assertNoIndices(response); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowClusterLevel() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); @@ -178,15 +176,21 @@ public void testClusterHealthYellowClusterLevel() throws IOException { logger.info("Shard stats\n{}", EntityUtils.toString( client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); - assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") public void testClusterHealthYellowIndicesLevel() throws IOException { - createIndex("index", Settings.EMPTY); - createIndex("index2", Settings.EMPTY); - ClusterHealthRequest request = new ClusterHealthRequest(); + String firstIndex = "index"; + String secondIndex = "index2"; + // including another index that we do not assert on, to ensure that we are not + // accidentally asserting on entire cluster state + String ignoredIndex = "tasks"; + createIndex(firstIndex, Settings.EMPTY); + createIndex(secondIndex, Settings.EMPTY); + if (randomBoolean()) { + createIndex(ignoredIndex, Settings.EMPTY); + } + ClusterHealthRequest request = new ClusterHealthRequest(firstIndex, secondIndex); request.timeout("5s"); request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); @@ -212,11 +216,9 @@ private static void assertYellowShards(ClusterHealthResponse response) { assertThat(response.getDelayedUnassignedShards(), equalTo(0)); assertThat(response.getInitializingShards(), equalTo(0)); assertThat(response.getUnassignedShards(), equalTo(2)); - assertThat(response.getActiveShardsPercent(), equalTo(50d)); } - - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450") + public void testClusterHealthYellowSpecificIndex() throws IOException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); @@ -236,7 +238,6 @@ public void testClusterHealthYellowSpecificIndex() throws IOException { assertThat(response.getDelayedUnassignedShards(), equalTo(0)); assertThat(response.getInitializingShards(), equalTo(0)); assertThat(response.getUnassignedShards(), equalTo(1)); - assertThat(response.getActiveShardsPercent(), equalTo(50d)); assertThat(response.getIndices().size(), equalTo(1)); Map.Entry index = response.getIndices().entrySet().iterator().next(); assertYellowIndex(index.getKey(), index.getValue(), false); @@ -272,7 +273,19 @@ private static void assertYellowShard(int shardId, ClusterShardHealth shardHealt assertThat(shardHealth.getRelocatingShards(), equalTo(0)); } + private static void assertNoIndices(ClusterHealthResponse response) { + assertThat(response.getIndices(), equalTo(emptyMap())); + assertThat(response.getActivePrimaryShards(), equalTo(0)); + assertThat(response.getNumberOfDataNodes(), equalTo(1)); + assertThat(response.getNumberOfNodes(), equalTo(1)); + assertThat(response.getActiveShards(), equalTo(0)); + assertThat(response.getDelayedUnassignedShards(), equalTo(0)); + assertThat(response.getInitializingShards(), equalTo(0)); + assertThat(response.getUnassignedShards(), equalTo(0)); + } + public void testClusterHealthNotFoundIndex() throws IOException { + createIndex("index", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest("notexisted-index"); request.timeout("5s"); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); @@ -284,15 +297,4 @@ public void testClusterHealthNotFoundIndex() throws IOException { assertNoIndices(response); } - private static void assertNoIndices(ClusterHealthResponse response) { - assertThat(response.getIndices(), equalTo(emptyMap())); - assertThat(response.getActivePrimaryShards(), equalTo(0)); - assertThat(response.getNumberOfDataNodes(), equalTo(1)); - assertThat(response.getNumberOfNodes(), equalTo(1)); - assertThat(response.getActiveShards(), equalTo(0)); - assertThat(response.getDelayedUnassignedShards(), equalTo(0)); - assertThat(response.getInitializingShards(), equalTo(0)); - assertThat(response.getUnassignedShards(), equalTo(0)); - assertThat(response.getActiveShardsPercent(), equalTo(100d)); - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e2102236cc422..301a32d97fe0c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -137,7 +137,7 @@ public void testDelete() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + docId + "]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: " + "version conflict, required seqNo [2], primary term [2]. current document has seqNo [3] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -166,7 +166,7 @@ public void testDelete() throws IOException { execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: version conflict, current version [12] is higher or equal to the one provided [10]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -301,7 +301,7 @@ public void testGet() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[id]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -527,7 +527,7 @@ public void testIndex() throws IOException { execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[id]: " + "version conflict, required seqNo [1], primary term [5]. current document has seqNo [2] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -574,7 +574,7 @@ public void testIndex() throws IOException { }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][with_create_op_type]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[with_create_op_type]: " + "version conflict, document already exists (current version [1])]", exception.getMessage()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index a7aa517709391..74d7095db28ea 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1261,7 +1261,8 @@ public void testGetAlias() throws IOException { GetAliasesResponse getAliasesResponse = execute(getAliasesRequest, highLevelClient().indices()::getAlias, highLevelClient().indices()::getAliasAsync); - assertThat(getAliasesResponse.getAliases().size(), equalTo(3)); + assertThat("Unexpected number of aliases, got: " + getAliasesResponse.getAliases().toString(), + getAliasesResponse.getAliases().size(), equalTo(3)); assertThat(getAliasesResponse.getAliases().get("index1").size(), equalTo(1)); AliasMetaData aliasMetaData1 = getAliasesResponse.getAliases().get("index1").iterator().next(); assertThat(aliasMetaData1, notNullValue()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 07d7187fd1d70..f7b7b148f660b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -878,6 +878,18 @@ private String createExpiredData(String jobId) throws Exception { waitForJobToClose(jobId); + long prevJobTimeStamp = System.currentTimeMillis() / 1000; + + // Check that the current timestamp component, in seconds, differs from previously. + // Note that we used to use an 'awaitBusy(() -> false, 1, TimeUnit.SECONDS);' + // for the same purpose but the new approach... + // a) explicitly checks that the timestamps, in seconds, are actually different and + // b) is slightly more efficient since we may not need to wait an entire second for the timestamp to increment + assertBusy(() -> { + long timeNow = System.currentTimeMillis() / 1000; + assertFalse(prevJobTimeStamp >= timeNow); + }); + // Update snapshot timestamp to force it out of snapshot retention window long oneDayAgo = nowMillis - TimeValue.timeValueHours(24).getMillis() - 1; updateModelSnapshotTimestamp(jobId, String.valueOf(oneDayAgo)); @@ -1418,6 +1430,7 @@ private void startDatafeed(String datafeedId, String start, String end) throws E } private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws Exception { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); GetModelSnapshotsRequest getModelSnapshotsRequest = new GetModelSnapshotsRequest(jobId); @@ -1435,9 +1448,6 @@ private void updateModelSnapshotTimestamp(String jobId, String timestamp) throws UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + jobId, "_doc", documentId); updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON); highLevelClient().update(updateSnapshotRequest, RequestOptions.DEFAULT); - - // Wait a second to ensure subsequent model snapshots will have a different ID (it depends on epoch seconds) - awaitBusy(() -> false, 1, TimeUnit.SECONDS); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java index 350659ae1449a..57b6e422bb1cd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java @@ -19,12 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.client.migration.DeprecationInfoRequest; import org.elasticsearch.client.migration.DeprecationInfoResponse; -import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.client.migration.IndexUpgradeRequest; import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.settings.Settings; @@ -32,51 +28,10 @@ import java.util.Collections; import java.util.function.BooleanSupplier; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class MigrationIT extends ESRestHighLevelClientTestCase { - public void testGetAssistance() throws IOException { - { - IndexUpgradeInfoResponse response = highLevelClient().migration() - .getAssistance(new IndexUpgradeInfoRequest(), RequestOptions.DEFAULT); - assertEquals(0, response.getActions().size()); - } - { - createIndex("test", Settings.EMPTY); - IndexUpgradeInfoResponse response = highLevelClient().migration().getAssistance( - new IndexUpgradeInfoRequest("test"), RequestOptions.DEFAULT); - assertEquals(0, response.getActions().size()); - } - } - - public void testUpgradeWhenIndexCannotBeUpgraded() throws IOException { - createIndex("test", Settings.EMPTY); - - ThrowingRunnable execute = () -> execute(new IndexUpgradeRequest("test"), - highLevelClient().migration()::upgrade, - highLevelClient().migration()::upgradeAsync); - - ElasticsearchStatusException responseException = expectThrows(ElasticsearchStatusException.class, execute); - - assertThat(responseException.getDetailedMessage(), containsString("cannot be upgraded")); - } - - public void testUpgradeWithTaskApi() throws IOException, InterruptedException { - createIndex("test", Settings.EMPTY); - - IndexUpgradeRequest request = new IndexUpgradeRequest("test"); - - TaskSubmissionResponse upgrade = highLevelClient().migration() - .submitUpgradeTask(request, RequestOptions.DEFAULT); - - assertNotNull(upgrade.getTask()); - - BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(upgrade); - awaitBusy(hasUpgradeCompleted); - } - public void testGetDeprecationInfo() throws IOException { createIndex("test", Settings.EMPTY); DeprecationInfoRequest request = new DeprecationInfoRequest(Collections.singletonList("test")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java index 99fd1205b412e..88936eff0246a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationRequestConvertersTests.java @@ -20,9 +20,7 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeRequest; +import org.elasticsearch.client.migration.DeprecationInfoRequest; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -30,37 +28,16 @@ public class MigrationRequestConvertersTests extends ESTestCase { - public void testGetMigrationAssistance() { - IndexUpgradeInfoRequest upgradeInfoRequest = new IndexUpgradeInfoRequest(); - String expectedEndpoint = "/_migration/assistance"; - if (randomBoolean()) { - String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); - upgradeInfoRequest.indices(indices); - expectedEndpoint += "/" + String.join(",", indices); - } + public void testGetDeprecationInfo() { + DeprecationInfoRequest deprecationInfoRequest = new DeprecationInfoRequest(); + String expectedEndpoint = "/_migration/deprecations"; + Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomIndicesOptions(upgradeInfoRequest::indicesOptions, upgradeInfoRequest::indicesOptions, - expectedParams); - Request request = MigrationRequestConverters.getMigrationAssistance(upgradeInfoRequest); + Request request = MigrationRequestConverters.getDeprecationInfo(deprecationInfoRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); assertEquals(expectedEndpoint, request.getEndpoint()); assertNull(request.getEntity()); assertEquals(expectedParams, request.getParameters()); } - public void testUpgradeRequest() { - String[] indices = RequestConvertersTests.randomIndicesNames(1, 1); - IndexUpgradeRequest upgradeInfoRequest = new IndexUpgradeRequest(indices[0]); - - String expectedEndpoint = "/_migration/upgrade/" + indices[0]; - Map expectedParams = new HashMap<>(); - expectedParams.put("wait_for_completion", Boolean.TRUE.toString()); - - Request request = MigrationRequestConverters.migrate(upgradeInfoRequest); - - assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(expectedEndpoint, request.getEndpoint()); - assertNull(request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 54826e963cb83..c528e87db37df 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -1321,6 +1321,7 @@ public void testCountMultipleIndicesNoQuery() throws IOException { assertEquals(7, countResponse.getCount()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39754") public void testCountAllIndicesNoQuery() throws IOException { CountRequest countRequest = new CountRequest(); CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowConfigTests.java new file mode 100644 index 0000000000000..0d8b4ca872696 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowConfigTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class FollowConfigTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + FollowConfigTests::createTestInstance, + (followConfig, xContentBuilder) -> { + xContentBuilder.startObject(); + followConfig.toXContentFragment(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + }, + FollowConfig::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + static FollowConfig createTestInstance() { + FollowConfig followConfig = new FollowConfig(); + if (randomBoolean()) { + followConfig.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + followConfig.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + followConfig.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + followConfig.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + followConfig.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + followConfig.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + followConfig.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + followConfig.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + followConfig.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + followConfig.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + } + return followConfig; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java new file mode 100644 index 0000000000000..5cd327495dc1c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/FollowInfoResponseTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.ccr.FollowInfoResponse.FollowerInfo; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class FollowInfoResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + FollowInfoResponseTests::createTestInstance, + FollowInfoResponseTests::toXContent, + FollowInfoResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + private static void toXContent(FollowInfoResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.startArray(FollowInfoResponse.FOLLOWER_INDICES_FIELD.getPreferredName()); + for (FollowerInfo info : response.getInfos()) { + builder.startObject(); + builder.field(FollowerInfo.FOLLOWER_INDEX_FIELD.getPreferredName(), info.getFollowerIndex()); + builder.field(FollowerInfo.REMOTE_CLUSTER_FIELD.getPreferredName(), info.getRemoteCluster()); + builder.field(FollowerInfo.LEADER_INDEX_FIELD.getPreferredName(), info.getLeaderIndex()); + builder.field(FollowerInfo.STATUS_FIELD.getPreferredName(), info.getStatus().getName()); + if (info.getParameters() != null) { + builder.startObject(FollowerInfo.PARAMETERS_FIELD.getPreferredName()); + { + info.getParameters().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + + private static FollowInfoResponse createTestInstance() { + int numInfos = randomIntBetween(0, 64); + List infos = new ArrayList<>(numInfos); + for (int i = 0; i < numInfos; i++) { + FollowInfoResponse.Status status = randomFrom(FollowInfoResponse.Status.values()); + FollowConfig followConfig = randomBoolean() ? FollowConfigTests.createTestInstance() : null; + infos.add(new FollowerInfo(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4), status, followConfig)); + } + return new FollowInfoResponse(infos); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java new file mode 100644 index 0000000000000..96438725d4ef0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; + +public class BroadcastResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + final String index = randomAlphaOfLength(8); + final String id = randomAlphaOfLength(8); + final int total = randomIntBetween(1, 16); + final int successful = total - scaledRandomIntBetween(0, total); + final int failed = scaledRandomIntBetween(0, total - successful); + final List failures = new ArrayList<>(); + final Set shardIds = new HashSet<>(); + for (int i = 0; i < failed; i++) { + final DefaultShardOperationFailedException failure = new DefaultShardOperationFailedException( + index, + randomValueOtherThanMany(shardIds::contains, () -> randomIntBetween(0, total - 1)), + new RetentionLeaseNotFoundException(id)); + failures.add(failure); + shardIds.add(failure.shardId()); + } + + final org.elasticsearch.action.support.broadcast.BroadcastResponse to = + new org.elasticsearch.action.support.broadcast.BroadcastResponse(total, successful, failed, failures); + + final XContentType xContentType = randomFrom(XContentType.values()); + final BytesReference bytes = toShuffledXContent(to, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + + final XContent xContent = XContentFactory.xContent(xContentType); + final XContentParser parser = xContent.createParser( + new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + LoggingDeprecationHandler.INSTANCE, + bytes.streamInput()); + final BroadcastResponse from = BroadcastResponse.fromXContent(parser); + assertThat(from.shards().total(), equalTo(total)); + assertThat(from.shards().successful(), equalTo(successful)); + assertThat(from.shards().skipped(), equalTo(0)); + assertThat(from.shards().failed(), equalTo(failed)); + assertThat(from.shards().failures(), hasSize(failed == 0 ? failed : 1)); // failures are grouped + if (failed > 0) { + final DefaultShardOperationFailedException groupedFailure = from.shards().failures().iterator().next(); + assertThat(groupedFailure.index(), equalTo(index)); + assertThat(groupedFailure.shardId(), isIn(shardIds)); + assertThat(groupedFailure.reason(), containsString("reason=retention lease with ID [" + id + "] not found")); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index 2e54d1c4a1a7c..baf8132096cb8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -36,8 +36,11 @@ import org.elasticsearch.client.ccr.CcrStatsRequest; import org.elasticsearch.client.ccr.CcrStatsResponse; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.FollowInfoRequest; +import org.elasticsearch.client.ccr.FollowInfoResponse; import org.elasticsearch.client.ccr.FollowStatsRequest; import org.elasticsearch.client.ccr.FollowStatsResponse; +import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern; @@ -49,15 +52,19 @@ import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.BroadcastResponse; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.yaml.ObjectPath; import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -392,6 +399,101 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testForgetFollower() throws InterruptedException, IOException { + final RestHighLevelClient client = highLevelClient(); + final String leaderIndex = "leader"; + { + // create leader index + final CreateIndexRequest createIndexRequest = new CreateIndexRequest(leaderIndex); + final Map settings = new HashMap<>(2); + final int numberOfShards = randomIntBetween(1, 2); + settings.put("index.number_of_shards", Integer.toString(numberOfShards)); + settings.put("index.soft_deletes.enabled", Boolean.TRUE.toString()); + createIndexRequest.settings(settings); + final CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + final String followerIndex = "follower"; + + final PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followerIndex, ActiveShardCount.ONE); + final PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertTrue(putFollowResponse.isFollowIndexCreated()); + assertTrue((putFollowResponse.isFollowIndexShardsAcked())); + assertTrue(putFollowResponse.isIndexFollowingStarted()); + + final PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertTrue(pauseFollowResponse.isAcknowledged()); + + final String followerCluster = highLevelClient().info(RequestOptions.DEFAULT).getClusterName().value(); + final Request statsRequest = new Request("GET", "/follower/_stats"); + final Response statsResponse = client().performRequest(statsRequest); + final ObjectPath statsObjectPath = ObjectPath.createFromResponse(statsResponse); + final String followerIndexUUID = statsObjectPath.evaluate("indices.follower.uuid"); + + final String leaderCluster = "local"; + + // tag::ccr-forget-follower-request + final ForgetFollowerRequest request = new ForgetFollowerRequest( + followerCluster, // <1> + followerIndex, // <2> + followerIndexUUID, // <3> + leaderCluster, // <4> + leaderIndex); // <5> + // end::ccr-forget-follower-request + + // tag::ccr-forget-follower-execute + final BroadcastResponse response = client + .ccr() + .forgetFollower(request, RequestOptions.DEFAULT); + // end::ccr-forget-follower-execute + + // tag::ccr-forget-follower-response + final BroadcastResponse.Shards shards = response.shards(); // <1> + final int total = shards.total(); // <2> + final int successful = shards.successful(); // <3> + final int skipped = shards.skipped(); // <4> + final int failed = shards.failed(); // <5> + shards.failures().forEach(failure -> {}); // <6> + // end::ccr-forget-follower-response + + // tag::ccr-forget-follower-execute-listener + ActionListener listener = + new ActionListener() { + + @Override + public void onResponse(final BroadcastResponse response) { + final BroadcastResponse.Shards shards = // <1> + response.shards(); + final int total = shards.total(); + final int successful = shards.successful(); + final int skipped = shards.skipped(); + final int failed = shards.failed(); + shards.failures().forEach(failure -> {}); + } + + @Override + public void onFailure(final Exception e) { + // <2> + } + + }; + // end::ccr-forget-follower-execute-listener + + // replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-forget-follower-execute-async + client.ccr().forgetFollowerAsync( + request, + RequestOptions.DEFAULT, + listener); // <1> + // end::ccr-forget-follower-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testPutAutoFollowPattern() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -697,6 +799,74 @@ public void onFailure(Exception e) { } } + public void testGetFollowInfos() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + { + // Follow index, so that we can query for follow stats: + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower", ActiveShardCount.ONE); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + } + + // tag::ccr-get-follow-info-request + FollowInfoRequest request = + new FollowInfoRequest("follower"); // <1> + // end::ccr-get-follow-info-request + + // tag::ccr-get-follow-info-execute + FollowInfoResponse response = client.ccr() + .getFollowInfo(request, RequestOptions.DEFAULT); + // end::ccr-get-follow-info-execute + + // tag::ccr-get-follow-info-response + List infos = + response.getInfos(); // <1> + // end::ccr-get-follow-info-response + + // tag::ccr-get-follow-info-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(FollowInfoResponse response) { // <1> + List infos = + response.getInfos(); + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-get-follow-info-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-get-follow-info-execute-async + client.ccr().getFollowInfoAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ccr-get-follow-info-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + } + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java index ca5e18c376b92..ea6fb8d864a73 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java @@ -19,23 +19,14 @@ package org.elasticsearch.client.documentation; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.migration.DeprecationInfoRequest; import org.elasticsearch.client.migration.DeprecationInfoResponse; -import org.elasticsearch.client.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.client.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.client.migration.IndexUpgradeRequest; -import org.elasticsearch.client.migration.UpgradeActionRequired; -import org.elasticsearch.client.tasks.TaskSubmissionResponse; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.reindex.BulkByScrollResponse; import java.io.IOException; import java.util.ArrayList; @@ -44,10 +35,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.isEmptyOrNullString; -import static org.hamcrest.Matchers.not; - /** * This class is used to generate the Java Migration API documentation. * You need to wrap your code between two tags like: @@ -68,98 +55,6 @@ */ public class MigrationClientDocumentationIT extends ESRestHighLevelClientTestCase { - public void testGetAssistance() throws IOException { - RestHighLevelClient client = highLevelClient(); - - // tag::get-assistance-request - IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(); // <1> - // end::get-assistance-request - - // tag::get-assistance-request-indices - request.indices("index1", "index2"); // <1> - // end::get-assistance-request-indices - - request.indices(Strings.EMPTY_ARRAY); - - // tag::get-assistance-request-indices-options - request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::get-assistance-request-indices-options - - // tag::get-assistance-execute - IndexUpgradeInfoResponse response = client.migration().getAssistance(request, RequestOptions.DEFAULT); - // end::get-assistance-execute - - // tag::get-assistance-response - Map actions = response.getActions(); - for (Map.Entry entry : actions.entrySet()) { - String index = entry.getKey(); // <1> - UpgradeActionRequired actionRequired = entry.getValue(); // <2> - } - // end::get-assistance-response - } - - public void testUpgrade() throws IOException { - - RestHighLevelClient client = highLevelClient(); - createIndex("test", Settings.EMPTY); - - - // tag::upgrade-request - IndexUpgradeRequest request = new IndexUpgradeRequest("test"); // <1> - // end::upgrade-request - - try { - - // tag::upgrade-execute - BulkByScrollResponse response = client.migration().upgrade(request, RequestOptions.DEFAULT); - // end::upgrade-execute - - } catch (ElasticsearchStatusException e) { - assertThat(e.getMessage(), containsString("cannot be upgraded")); - } - } - - public void testUpgradeAsync() throws IOException, InterruptedException { - RestHighLevelClient client = highLevelClient(); - createIndex("test", Settings.EMPTY); - final CountDownLatch latch = new CountDownLatch(1); - - // tag::upgrade-async-listener - ActionListener listener = new ActionListener() { - @Override - public void onResponse(BulkByScrollResponse bulkResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::upgrade-async-listener - - listener = new LatchedActionListener<>(listener, latch); - - // tag::upgrade-async-execute - client.migration().upgradeAsync(new IndexUpgradeRequest("test"), RequestOptions.DEFAULT, listener); // <1> - // end::upgrade-async-execute - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - - public void testUpgradeWithTaskApi() throws IOException { - createIndex("test", Settings.EMPTY); - RestHighLevelClient client = highLevelClient(); - // tag::upgrade-task-api - IndexUpgradeRequest request = new IndexUpgradeRequest("test"); - - TaskSubmissionResponse response = client.migration() - .submitUpgradeTask(request, RequestOptions.DEFAULT); - String taskId = response.getTask(); - // end::upgrade-task-api - assertThat(taskId, not(isEmptyOrNullString())); - } - public void testGetDeprecationInfo() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); createIndex("test", Settings.EMPTY); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index d041fa5d65d92..f9c521aca2e04 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -979,39 +979,39 @@ public void testGetSslCertificates() throws Exception { assertThat(certificates.size(), Matchers.equalTo(9)); final Iterator it = certificates.iterator(); CertificateInfo c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=testnode-client-profile")); + assertThat(c.getSerialNumber(), Matchers.equalTo("c0ea4216e8ff0fd8")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=Elasticsearch Test Node, OU=elasticsearch, O=org")); + assertThat(c.getSerialNumber(), Matchers.equalTo("b8b96c37e332cccb")); assertThat(c.getPath(), Matchers.equalTo("testnode.crt")); assertThat(c.getFormat(), Matchers.equalTo("PEM")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=OpenLDAP, OU=Elasticsearch, O=Elastic, L=Mountain View, ST=CA, C=US")); + assertThat(c.getSerialNumber(), Matchers.equalTo("d3850b2b1995ad5f")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=Elasticsearch Test Node, OU=elasticsearch, O=org")); + assertThat(c.getSerialNumber(), Matchers.equalTo("b8b96c37e332cccb")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=Elasticsearch Test Client, OU=elasticsearch, O=org")); + assertThat(c.getSerialNumber(), Matchers.equalTo("b9d497f2924bbe29")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=ad-ELASTICSEARCHAD-CA, DC=ad, DC=test, DC=elasticsearch, DC=com")); + assertThat(c.getSerialNumber(), Matchers.equalTo("580db8ad52bb168a4080e1df122a3f56")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=Elasticsearch Test Node")); + assertThat(c.getSerialNumber(), Matchers.equalTo("7268203b")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=samba4")); + assertThat(c.getSerialNumber(), Matchers.equalTo("3151a81eec8d4e34c56a8466a8510bcfbe63cc31")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); c = it.next(); - assertThat(c.getSubjectDn(), Matchers.equalTo("CN=Elasticsearch Test Node")); + assertThat(c.getSerialNumber(), Matchers.equalTo("223c736a")); assertThat(c.getPath(), Matchers.equalTo("testnode.jks")); assertThat(c.getFormat(), Matchers.equalTo("jks")); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index 38c8986e1d9f0..56a7fce498c2a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -179,11 +179,16 @@ public void testCancelTasks() throws IOException { List tasks = response.getTasks(); // <1> // end::cancel-tasks-response-tasks + // tag::cancel-tasks-response-calc + Map> perNodeTasks = response.getPerNodeTasks(); // <1> + List groups = response.getTaskGroups(); // <2> + // end::cancel-tasks-response-calc + // tag::cancel-tasks-response-failures List nodeFailures = response.getNodeFailures(); // <1> List taskFailures = response.getTaskFailures(); // <2> - // end::-tasks-response-failures + // end::cancel-tasks-response-failures assertThat(response.getNodeFailures(), equalTo(emptyList())); assertThat(response.getTaskFailures(), equalTo(emptyList())); diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 6b22b7b909909..7bbcb1df85694 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -22,8 +22,8 @@ apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -targetCompatibility = JavaVersion.VERSION_1_7 -sourceCompatibility = JavaVersion.VERSION_1_7 +targetCompatibility = JavaVersion.VERSION_1_8 +sourceCompatibility = JavaVersion.VERSION_1_8 group = 'org.elasticsearch.client' archivesBaseName = 'elasticsearch-rest-client' diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 382a3f3c9d121..fffc1b711b25c 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -20,8 +20,8 @@ apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -targetCompatibility = JavaVersion.VERSION_1_7 -sourceCompatibility = JavaVersion.VERSION_1_7 +targetCompatibility = JavaVersion.VERSION_1_8 +sourceCompatibility = JavaVersion.VERSION_1_8 group = 'org.elasticsearch.client' archivesBaseName = 'elasticsearch-rest-client-sniffer' diff --git a/client/test/build.gradle b/client/test/build.gradle index f184cfbb73c3d..faf5fb7bddf46 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -18,8 +18,8 @@ */ apply plugin: 'elasticsearch.build' -targetCompatibility = JavaVersion.VERSION_1_7 -sourceCompatibility = JavaVersion.VERSION_1_7 +targetCompatibility = JavaVersion.VERSION_1_8 +sourceCompatibility = JavaVersion.VERSION_1_8 group = "${group}.client.test" @@ -53,4 +53,4 @@ dependenciesInfo.enabled = false //we aren't releasing this jar thirdPartyAudit.enabled = false -unitTest.enabled = false +test.enabled = false diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl index 16a00d4eff2ae..1d1d767200eca 100755 --- a/dev-tools/es_release_notes.pl +++ b/dev-tools/es_release_notes.pl @@ -87,11 +87,11 @@ sub dump_issues { :pull: https://github.com/${User_Repo}pull/ [[release-notes-$version]] -== $version Release Notes +== {es} version $version coming[$version] -Also see <>. +Also see <>. ASCIIDOC diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 3723e31b27f1e..7240ee0fbb699 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -45,7 +45,7 @@ task createPluginsDir(type: EmptyDirTask) { dirMode 0755 } -CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, boolean oss) { +CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, boolean oss, boolean jdk) { return copySpec { into("elasticsearch-${version}") { into('lib') { @@ -54,10 +54,15 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, boolean os into('config') { dirMode 0750 fileMode 0660 - with configFiles(distributionType, oss) + with configFiles(distributionType, oss, jdk) } into('bin') { - with binFiles(distributionType, oss) + with binFiles(distributionType, oss, jdk) + } + if (jdk) { + into('jdk') { + with jdkFiles(platform) + } } into('') { from { @@ -79,7 +84,7 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, boolean os rename { 'LICENSE.txt' } } - with noticeFile + with noticeFile(oss, jdk) into('modules') { with modulesFiles } @@ -102,19 +107,31 @@ Closure commonZipConfig = { task buildIntegTestZip(type: Zip) { configure(commonZipConfig) - with archiveFiles(transportModulesFiles, 'zip', true) + with archiveFiles(transportModulesFiles, 'zip', null, true, false) } task buildWindowsZip(type: Zip) { configure(commonZipConfig) archiveClassifier = 'windows-x86_64' - with archiveFiles(modulesFiles(false), 'zip', false) + with archiveFiles(modulesFiles(false), 'zip', 'windows', false, true) } task buildOssWindowsZip(type: Zip) { configure(commonZipConfig) archiveClassifier = 'windows-x86_64' - with archiveFiles(modulesFiles(true), 'zip', true) + with archiveFiles(modulesFiles(true), 'zip', 'windows', true, true) +} + +task buildNoJdkWindowsZip(type: Zip) { + configure(commonZipConfig) + archiveClassifier = 'no-jdk-windows-x86_64' + with archiveFiles(modulesFiles(false), 'zip', 'windows', false, false) +} + +task buildOssNoJdkWindowsZip(type: Zip) { + configure(commonZipConfig) + archiveClassifier = 'no-jdk-windows-x86_64' + with archiveFiles(modulesFiles(true), 'zip', 'windows', true, false) } Closure commonTarConfig = { @@ -127,25 +144,49 @@ Closure commonTarConfig = { task buildDarwinTar(type: Tar) { configure(commonTarConfig) archiveClassifier = 'darwin-x86_64' - with archiveFiles(modulesFiles(false), 'tar', false) + with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, true) } task buildOssDarwinTar(type: Tar) { configure(commonTarConfig) archiveClassifier = 'darwin-x86_64' - with archiveFiles(modulesFiles(true), 'tar', true) + with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, true) +} + +task buildNoJdkDarwinTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'no-jdk-darwin-x86_64' + with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, false) +} + +task buildOssNoJdkDarwinTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'no-jdk-darwin-x86_64' + with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, false) } task buildLinuxTar(type: Tar) { configure(commonTarConfig) archiveClassifier = 'linux-x86_64' - with archiveFiles(modulesFiles(false), 'tar', false) + with archiveFiles(modulesFiles(false), 'tar', 'linux', false, true) } task buildOssLinuxTar(type: Tar) { configure(commonTarConfig) archiveClassifier = 'linux-x86_64' - with archiveFiles(modulesFiles(true), 'tar', true) + with archiveFiles(modulesFiles(true), 'tar', 'linux', true, true) +} + +task buildNoJdkLinuxTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'no-jdk-linux-x86_64' + with archiveFiles(modulesFiles(false), 'tar', 'linux', false, false) +} + +task buildOssNoJdkLinuxTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'no-jdk-linux-x86_64' + with archiveFiles(modulesFiles(true), 'tar', 'linux', true, false) } Closure tarExists = { it -> new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() } diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index d79971907b50d..f44136b2565cc 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -26,7 +26,7 @@ integTestRunner { * when running against an external cluster. */ if (System.getProperty("tests.rest.cluster") == null) { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } else { systemProperty 'tests.logfile', '--external--' diff --git a/distribution/archives/no-jdk-darwin-tar/build.gradle b/distribution/archives/no-jdk-darwin-tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/no-jdk-darwin-tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/no-jdk-linux-tar/build.gradle b/distribution/archives/no-jdk-linux-tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/no-jdk-linux-tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/no-jdk-windows-zip/build.gradle b/distribution/archives/no-jdk-windows-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/no-jdk-windows-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/oss-no-jdk-darwin-tar/build.gradle b/distribution/archives/oss-no-jdk-darwin-tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-no-jdk-darwin-tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/oss-no-jdk-linux-tar/build.gradle b/distribution/archives/oss-no-jdk-linux-tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-no-jdk-linux-tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/oss-no-jdk-windows-zip/build.gradle b/distribution/archives/oss-no-jdk-windows-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-no-jdk-windows-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/build.gradle b/distribution/build.gradle index 3cf4a170700df..49adc1dbe3d91 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -20,11 +20,14 @@ import org.elasticsearch.gradle.ConcatFilesTask import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.NoticeTask +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RunTask import org.apache.tools.ant.filters.FixCrLfFilter import java.nio.file.Files import java.nio.file.Path +import java.util.regex.Matcher +import java.util.regex.Pattern /***************************************************************************** * Third party dependencies report * @@ -49,11 +52,17 @@ task buildServerNotice(type: NoticeTask) { // other distributions include notices from modules as well, which are added below later task buildDefaultNotice(type: NoticeTask) { licensesDir new File(project(':server').projectDir, 'licenses') + licensesDir new File(project(':distribution').projectDir, 'licenses') } - -// other distributions include notices from modules as well, which are added below later task buildOssNotice(type: NoticeTask) { licensesDir new File(project(':server').projectDir, 'licenses') + licensesDir new File(project(':distribution').projectDir, 'licenses') +} +task buildDefaultNoJdkNotice(type: NoticeTask) { + licensesDir new File(project(':server').projectDir, 'licenses') +} +task buildOssNoJdkNotice(type: NoticeTask) { + licensesDir new File(project(':server').projectDir, 'licenses') } /***************************************************************************** @@ -210,6 +219,72 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } +/***************************************************************************** + * JDKs * + *****************************************************************************/ +// extract the bundled jdk version, broken into elements as: [feature, interim, update, build] +// Note the "patch" version is not yet handled here, as it has not yet been used by java. +Pattern JDK_VERSION = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)@([a-f0-9]{32})?") +Matcher jdkVersionMatcher = JDK_VERSION.matcher(VersionProperties.bundledJdk) +if (jdkVersionMatcher.matches() == false) { + throw new IllegalArgumentException("Malformed jdk version [" + VersionProperties.bundledJdk + "]") +} +String jdkVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : "") +String jdkMajor = jdkVersionMatcher.group(1) +String jdkBuild = jdkVersionMatcher.group(3) +String hash = jdkVersionMatcher.group(4) + +repositories { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + ivy { + name "legacy-jdk" + url "https://download.oracle.com" + metadataSources { + artifact() + } + patternLayout { + artifact "java/GA/jdk${jdkMajor}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" + } + } + // current pattern since 12.0.1 + ivy { + name "jdk" + url "https://download.oracle.com" + metadataSources { + artifact() + } + patternLayout { + artifact "java/GA/jdk${jdkVersion}/${hash}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" + } + } +} +for (String platform : ['linux', 'darwin', 'windows']) { + String jdkConfigName = "jdk_${platform}" + Configuration jdkConfig = configurations.create(jdkConfigName) + String extension = platform.equals('windows') ? 'zip' : 'tar.gz' + dependencies.add(jdkConfigName, "jdk:${platform.equals('darwin') ? 'osx' : platform}:${jdkVersion}@${extension}") + + int rootNdx = platform.equals('darwin') ? 2 : 1 + Closure removeRootDir = { + it.eachFile { FileCopyDetails details -> + details.relativePath = new RelativePath(true, details.relativePath.segments[rootNdx..-1] as String[]) + } + it.includeEmptyDirs false + } + String extractDir = "${buildDir}/jdks/openjdk-${jdkVersion}_${platform}" + project.task("extract${platform.capitalize()}Jdk", type: Copy) { + doFirst { + project.delete(extractDir) + } + into extractDir + if (extension.equals('zip')) { + from({ zipTree(jdkConfig.singleFile) }, removeRootDir) + } else { + from({ tarTree(resources.gzip(jdkConfig.singleFile)) }, removeRootDir) + } + } +} + // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir task clean(type: Delete) { @@ -269,13 +344,13 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from project(':distribution').buildTransportModules } - configFiles = { distributionType, oss -> + configFiles = { distributionType, oss, jdk -> copySpec { with copySpec { // main config files, processed with distribution specific substitutions from '../src/config' exclude 'log4j2.properties' // this is handled separately below - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss, jdk)) } if (oss) { from project(':distribution').buildOssLog4jConfig @@ -287,7 +362,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - binFiles = { distributionType, oss -> + binFiles = { distributionType, oss, jdk -> copySpec { // non-windows files, for all distributions with copySpec { @@ -295,7 +370,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude '*.exe' exclude '*.bat' eachFile { it.setMode(0755) } - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss, jdk)) } // windows files, only for zip if (distributionType == 'zip') { @@ -303,7 +378,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from '../src/bin' include '*.bat' filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss, jdk)) } with copySpec { from '../src/bin' @@ -325,11 +400,32 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - noticeFile = copySpec { - if (project.name == 'integ-test-zip') { - from buildServerNotice - } else { - from buildDefaultNotice + noticeFile = { oss, jdk -> + copySpec { + if (project.name == 'integ-test-zip') { + from buildServerNotice + } else { + if (oss && jdk) { + from buildOssNotice + } else if (oss) { + from buildOssNoJdkNotice + } else if (jdk) { + from buildDefaultNotice + } else { + from buildDefaultNoJdkNotice + } + } + } + } + + jdkFiles = { platform -> + copySpec { + from project(':distribution').tasks.getByName("extract${platform.capitalize()}Jdk") + eachFile { FileCopyDetails details -> + if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { + details.mode = 0755 + } + } } } } @@ -389,7 +485,7 @@ task run(type: RunTask) { * */ subprojects { - ext.expansionsForDistribution = { distributionType, oss -> + ext.expansionsForDistribution = { distributionType, oss, jdk -> final String defaultHeapSize = "1g" final String packagingPathData = "path.data: /var/lib/elasticsearch" final String pathLogs = "/var/log/elasticsearch" @@ -482,6 +578,10 @@ subprojects { 'zip': 'zip' ], + 'es.bundled_jdk': [ + 'def': jdk ? 'true' : 'false' + ], + 'license.name': [ 'deb': oss ? 'ASL-2.0' : 'Elastic-License' ], diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 0ee597b449a41..29628589a5155 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -120,40 +120,8 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre } } - Map artifactFiles = [:] - List projectDirs = [] - List projects = ['deb', 'rpm'] - if (bwcVersion.onOrAfter('7.0.0')) { - projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar']) - } else { - projects.add('zip') - } - - for (String projectName : projects) { - String baseDir = "distribution" - String classifier = "" - String extension = projectName - if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) { - int index = projectName.indexOf('-') - classifier = "-${projectName.substring(0, index)}-x86_64" - extension = projectName.substring(index + 1) - if (extension.equals('tar')) { - extension += '.gz' - } - } - if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' - // add oss variant first - projectDirs.add("${baseDir}/oss-${projectName}") - artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) - } - projectDirs.add("${baseDir}/${projectName}") - artifactFiles.put(projectName, - file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) - } - Closure createRunBwcGradleTask = { name, extraConfig -> - task "$name"(type: Exec) { + return tasks.create(name: "$name", type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir doFirst { @@ -163,8 +131,9 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre 'JAVA_HOME', getJavaHome(it, Integer.parseInt( lines - .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) + .findAll({ it.startsWith("ES_BUILD_JAVA=") }) .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) + .collect({ it.replace("ES_BUILD_JAVA=openjdk", "").trim() }) .join("!!") )) ) @@ -201,34 +170,87 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre } else if (showStacktraceName.equals("ALWAYS_FULL")) { args "--full-stacktrace" } + if (gradle.getStartParameter().isParallelProjectExecutionEnabled()) { + args "--parallel" + } standardOutput = new IndentingOutputStream(System.out, bwcVersion) errorOutput = new IndentingOutputStream(System.err, bwcVersion) configure extraConfig } } - createRunBwcGradleTask("buildBwcVersion") { - for (String dir : projectDirs) { - args ":${dir.replace('/', ':')}:assemble" - } - doLast { - List missing = artifactFiles.values().grep { file -> - false == file.exists() - } - if (false == missing.empty) { - throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected files ${missing}") + Closure buildBwcTaskName = { projectName -> + return "buildBwc${projectName.replaceAll(/-\w/){ it[1].toUpperCase() }.capitalize()}" + } + + task buildBwc {} + + Closure createBuildBwcTask = { projectName, projectDir, projectArtifact -> + Task bwcTask = createRunBwcGradleTask(buildBwcTaskName(projectName)) { + args ":${projectDir.replace('/', ':')}:assemble" + doLast { + if (projectArtifact.exists() == false) { + throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected file ${projectArtifact}") + } } } + buildBwc.dependsOn bwcTask + } + + Map artifactFiles = [:] + List projectDirs = [] + List projects = ['deb', 'rpm'] + if (bwcVersion.onOrAfter('7.0.0')) { + projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar']) + } else { + projects.add('zip') + } + + for (String projectName : projects) { + String baseDir = "distribution" + String classifier = "" + String extension = projectName + if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) { + int index = projectName.indexOf('-') + classifier = "-${projectName.substring(0, index)}-x86_64" + extension = projectName.substring(index + 1) + if (extension.equals('tar')) { + extension += '.gz' + } + } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) { + classifier = "-amd64" + } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) { + classifier = "-x86_64" + } + if (bwcVersion.onOrAfter('6.3.0')) { + baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' + // add oss variant first + projectDirs.add("${baseDir}/oss-${projectName}") + File ossProjectArtifact = file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}") + artifactFiles.put("oss-" + projectName, ossProjectArtifact) + createBuildBwcTask("oss-${projectName}", "${baseDir}/oss-${projectName}", ossProjectArtifact) + } + projectDirs.add("${baseDir}/${projectName}") + File projectArtifact = file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}") + artifactFiles.put(projectName, projectArtifact) + + createBuildBwcTask(projectName, "${baseDir}/${projectName}", projectArtifact) } createRunBwcGradleTask("resolveAllBwcDependencies") { args 'resolveAllDependencies' } - - resolveAllDependencies.dependsOn resolveAllBwcDependencies + Version currentVersion = Version.fromString(version) + if (currentVersion.getMinor() == 0 && currentVersion.getRevision() == 0) { + // We only want to resolve dependencies for live versions of master, without cascading this to older versions + resolveAllDependencies.dependsOn resolveAllBwcDependencies + } for (e in artifactFiles) { String projectName = e.key + String buildBwcTask = buildBwcTaskName(projectName) File artifactFile = e.value String artifactFileName = artifactFile.name String artifactName = artifactFileName.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' @@ -241,7 +263,7 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre } configurations.create(projectName) artifacts { - it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcVersion]) + it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcTask]) } } // make sure no dependencies were added to assemble; we want it to be a no-op diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index e4d27da1f1fb0..a37a9239f40d3 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -17,14 +17,14 @@ dependencies { ossDockerSource project(path: ":distribution:archives:oss-linux-tar") } -ext.expansions = { oss -> - String classifier = 'linux-x86_64' +ext.expansions = { oss, local -> + final String classifier = 'linux-x86_64' + final String elasticsearch = oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}-${classifier}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}-${classifier}.tar.gz" return [ - 'elasticsearch' : oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}-${classifier}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}-${classifier}.tar.gz", - 'jdkUrl' : 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz', - 'jdkVersion' : '11.0.2', - 'license': oss ? 'Apache-2.0' : 'Elastic License', - 'version' : VersionProperties.elasticsearch + 'elasticsearch' : elasticsearch, + 'license' : oss ? 'Apache-2.0' : 'Elastic License', + 'source_elasticsearch': local ? "COPY $elasticsearch /opt/" : "RUN cd /opt && curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch} && cd -", + 'version' : VersionProperties.elasticsearch ] } @@ -36,17 +36,30 @@ private static String taskName(final String prefix, final boolean oss, final Str return "${prefix}${oss ? 'Oss' : ''}${suffix}" } +project.ext { + dockerBuildContext = { boolean oss, boolean local -> + copySpec { + into('bin') { + from project.projectDir.toPath().resolve("src/docker/bin") + } + + into('config') { + from project.projectDir.toPath().resolve("src/docker/config") + } + + from(project.projectDir.toPath().resolve("src/docker/Dockerfile")) { + MavenFilteringHack.filter(it, expansions(oss, local)) + } + } + } +} + void addCopyDockerContextTask(final boolean oss) { task(taskName("copy", oss, "DockerContext"), type: Sync) { + inputs.properties(expansions(oss, true)) into files(oss) - into('bin') { - from 'src/docker/bin' - } - - into('config') { - from 'src/docker/config' - } + with dockerBuildContext(oss, true) if (oss) { from configurations.ossDockerSource @@ -58,24 +71,9 @@ void addCopyDockerContextTask(final boolean oss) { } } -void addCopyDockerfileTask(final boolean oss) { - task(taskName("copy", oss, "Dockerfile"), type: Copy) { - inputs.properties(expansions(oss)) // ensure task is run when ext.expansions is changed - mustRunAfter(taskName("copy", oss, "DockerContext")) - into files(oss) - - from('src/docker/Dockerfile') { - MavenFilteringHack.filter(it, expansions(oss)) - } - } -} - - preProcessFixture { dependsOn taskName("copy", true, "DockerContext") - dependsOn taskName("copy", true, "Dockerfile") dependsOn taskName("copy", false, "DockerContext") - dependsOn taskName("copy", false, "Dockerfile") } postProcessFixture.doLast { @@ -89,7 +87,6 @@ check.dependsOn postProcessFixture void addBuildDockerImage(final boolean oss) { final Task buildDockerImageTask = task(taskName("build", oss, "DockerImage"), type: LoggedExec) { dependsOn taskName("copy", oss, "DockerContext") - dependsOn taskName("copy", oss, "Dockerfile") List tags if (oss) { tags = [ "docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}" ] @@ -113,7 +110,6 @@ void addBuildDockerImage(final boolean oss) { for (final boolean oss : [false, true]) { addCopyDockerContextTask(oss) - addCopyDockerfileTask(oss) addBuildDockerImage(oss) } diff --git a/distribution/docker/docker-build-context/build.gradle b/distribution/docker/docker-build-context/build.gradle new file mode 100644 index 0000000000000..254407093ce82 --- /dev/null +++ b/distribution/docker/docker-build-context/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'base' + +task buildDockerBuildContext(type: Tar) { + extension = 'tar.gz' + compression = Compression.GZIP + archiveClassifier = "docker-build-context" + archiveBaseName = "elasticsearch" + with dockerBuildContext(false, false) +} + +assemble.dependsOn buildDockerBuildContext diff --git a/distribution/docker/oss-docker-build-context/build.gradle b/distribution/docker/oss-docker-build-context/build.gradle new file mode 100644 index 0000000000000..248b260daa94a --- /dev/null +++ b/distribution/docker/oss-docker-build-context/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'base' + +task buildOssDockerBuildContext(type: Tar) { + extension = 'tar.gz' + compression = Compression.GZIP + archiveClassifier = "docker-build-context" + archiveBaseName = "elasticsearch-oss" + with dockerBuildContext(true, false) +} + +assemble.dependsOn buildOssDockerBuildContext diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 37bbfe6688597..2164037e00020 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -14,14 +14,6 @@ FROM centos:7 AS builder ENV PATH /usr/share/elasticsearch/bin:$PATH -ENV JAVA_HOME /opt/jdk-${jdkVersion} - -RUN curl --retry 8 -s ${jdkUrl} | tar -C /opt -zxf - - -# Replace OpenJDK's built-in CA certificate keystore with the one from the OS -# vendor. The latter is superior in several ways. -# REF: https://github.com/elastic/elasticsearch-docker/issues/171 -RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /opt/jdk-${jdkVersion}/lib/security/cacerts RUN yum install -y unzip which @@ -30,13 +22,15 @@ RUN groupadd -g 1000 elasticsearch && \ WORKDIR /usr/share/elasticsearch -COPY ${elasticsearch} /opt/ +${source_elasticsearch} + RUN tar zxf /opt/${elasticsearch} --strip-components=1 +RUN grep ES_DISTRIBUTION_TYPE=tar /usr/share/elasticsearch/bin/elasticsearch-env \ + && sed -ie 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /usr/share/elasticsearch/bin/elasticsearch-env RUN mkdir -p config data logs RUN chmod 0775 config data logs COPY config/elasticsearch.yml config/log4j2.properties config/ - ################################################################################ # Build stage 1 (the actual elasticsearch image): # Copy elasticsearch from stage 0 @@ -46,13 +40,11 @@ COPY config/elasticsearch.yml config/log4j2.properties config/ FROM centos:7 ENV ELASTIC_CONTAINER true -ENV JAVA_HOME /opt/jdk-${jdkVersion} -COPY --from=builder /opt/jdk-${jdkVersion} /opt/jdk-${jdkVersion} - -RUN yum update -y && \ +RUN for iter in {1..10}; do yum update -y && \ yum install -y nc unzip wget which && \ - yum clean all + yum clean all && exit_code=0 && break || exit_code=$? && echo "yum error: retry $iter in 10s" && sleep 10; done; \ + (exit $exit_code) RUN groupadd -g 1000 elasticsearch && \ adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \ @@ -61,6 +53,12 @@ RUN groupadd -g 1000 elasticsearch && \ WORKDIR /usr/share/elasticsearch COPY --from=builder --chown=1000:0 /usr/share/elasticsearch /usr/share/elasticsearch + +# Replace OpenJDK's built-in CA certificate keystore with the one from the OS +# vendor. The latter is superior in several ways. +# REF: https://github.com/elastic/elasticsearch-docker/issues/171 +RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts + ENV PATH /usr/share/elasticsearch/bin:$PATH COPY --chown=1000:0 bin/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh diff --git a/distribution/licenses/openjdk-LICENSE.txt b/distribution/licenses/openjdk-LICENSE.txt new file mode 100644 index 0000000000000..b40a0f457d75c --- /dev/null +++ b/distribution/licenses/openjdk-LICENSE.txt @@ -0,0 +1,347 @@ +The GNU General Public License (GPL) + +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share +and change it. By contrast, the GNU General Public License is intended to +guarantee your freedom to share and change free software--to make sure the +software is free for all its users. This General Public License applies to +most of the Free Software Foundation's software and to any other program whose +authors commit to using it. (Some other Free Software Foundation software is +covered by the GNU Library General Public License instead.) You can apply it to +your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our +General Public Licenses are designed to make sure that you have the freedom to +distribute copies of free software (and charge for this service if you wish), +that you receive source code or can get it if you want it, that you can change +the software or use pieces of it in new free programs; and that you know you +can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny +you these rights or to ask you to surrender the rights. These restrictions +translate to certain responsibilities for you if you distribute copies of the +software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for +a fee, you must give the recipients all the rights that you have. You must +make sure that they, too, receive or can get the source code. And you must +show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) +offer you this license which gives you legal permission to copy, distribute +and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that +everyone understands that there is no warranty for this free software. If the +software is modified by someone else and passed on, we want its recipients to +know that what they have is not the original, so that any problems introduced +by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We +wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program proprietary. +To prevent this, we have made it clear that any patent must be licensed for +everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification +follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice +placed by the copyright holder saying it may be distributed under the terms of +this General Public License. The "Program", below, refers to any such program +or work, and a "work based on the Program" means either the Program or any +derivative work under copyright law: that is to say, a work containing the +Program or a portion of it, either verbatim or with modifications and/or +translated into another language. (Hereinafter, translation is included +without limitation in the term "modification".) Each licensee is addressed as +"you". + +Activities other than copying, distribution and modification are not covered by +this License; they are outside its scope. The act of running the Program is +not restricted, and the output from the Program is covered only if its contents +constitute a work based on the Program (independent of having been made by +running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as +you receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice and +disclaimer of warranty; keep intact all the notices that refer to this License +and to the absence of any warranty; and give any other recipients of the +Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may +at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus +forming a work based on the Program, and copy and distribute such modifications +or work under the terms of Section 1 above, provided that you also meet all of +these conditions: + + a) You must cause the modified files to carry prominent notices stating + that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in whole or + in part contains or is derived from the Program or any part thereof, to be + licensed as a whole at no charge to all third parties under the terms of + this License. + + c) If the modified program normally reads commands interactively when run, + you must cause it, when started running for such interactive use in the + most ordinary way, to print or display an announcement including an + appropriate copyright notice and a notice that there is no warranty (or + else, saying that you provide a warranty) and that users may redistribute + the program under these conditions, and telling the user how to view a copy + of this License. (Exception: if the Program itself is interactive but does + not normally print such an announcement, your work based on the Program is + not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable +sections of that work are not derived from the Program, and can be reasonably +considered independent and separate works in themselves, then this License, and +its terms, do not apply to those sections when you distribute them as separate +works. But when you distribute the same sections as part of a whole which is a +work based on the Program, the distribution of the whole must be on the terms +of this License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your +rights to work written entirely by you; rather, the intent is to exercise the +right to control the distribution of derivative or collective works based on +the Program. + +In addition, mere aggregation of another work not based on the Program with the +Program (or with a work based on the Program) on a volume of a storage or +distribution medium does not bring the other work under the scope of this +License. + +3. You may copy and distribute the Program (or a work based on it, under +Section 2) in object code or executable form under the terms of Sections 1 and +2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable source + code, which must be distributed under the terms of Sections 1 and 2 above + on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three years, to + give any third party, for a charge no more than your cost of physically + performing source distribution, a complete machine-readable copy of the + corresponding source code, to be distributed under the terms of Sections 1 + and 2 above on a medium customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to + distribute corresponding source code. (This alternative is allowed only + for noncommercial distribution and only if you received the program in + object code or executable form with such an offer, in accord with + Subsection b above.) + +The source code for a work means the preferred form of the work for making +modifications to it. For an executable work, complete source code means all +the source code for all modules it contains, plus any associated interface +definition files, plus the scripts used to control compilation and installation +of the executable. However, as a special exception, the source code +distributed need not include anything that is normally distributed (in either +source or binary form) with the major components (compiler, kernel, and so on) +of the operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the source +code from the same place counts as distribution of the source code, even though +third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as +expressly provided under this License. Any attempt otherwise to copy, modify, +sublicense or distribute the Program is void, and will automatically terminate +your rights under this License. However, parties who have received copies, or +rights, from you under this License will not have their licenses terminated so +long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. +However, nothing else grants you permission to modify or distribute the Program +or its derivative works. These actions are prohibited by law if you do not +accept this License. Therefore, by modifying or distributing the Program (or +any work based on the Program), you indicate your acceptance of this License to +do so, and all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), +the recipient automatically receives a license from the original licensor to +copy, distribute or modify the Program subject to these terms and conditions. +You may not impose any further restrictions on the recipients' exercise of the +rights granted herein. You are not responsible for enforcing compliance by +third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), conditions +are imposed on you (whether by court order, agreement or otherwise) that +contradict the conditions of this License, they do not excuse you from the +conditions of this License. If you cannot distribute so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not distribute the Program at all. +For example, if a patent license would not permit royalty-free redistribution +of the Program by all those who receive copies directly or indirectly through +you, then the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply and +the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or +other property right claims or to contest validity of any such claims; this +section has the sole purpose of protecting the integrity of the free software +distribution system, which is implemented by public license practices. Many +people have made generous contributions to the wide range of software +distributed through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing to +distribute software through any other system and a licensee cannot impose that +choice. + +This section is intended to make thoroughly clear what is believed to be a +consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain +countries either by patents or by copyrighted interfaces, the original +copyright holder who places the Program under this License may add an explicit +geographical distribution limitation excluding those countries, so that +distribution is permitted only in or among countries not thus excluded. In +such case, this License incorporates the limitation as if written in the body +of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the +General Public License from time to time. Such new versions will be similar in +spirit to the present version, but may differ in detail to address new problems +or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any later +version", you have the option of following the terms and conditions either of +that version or of any later version published by the Free Software Foundation. +If the Program does not specify a version number of this License, you may +choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs +whose distribution conditions are different, write to the author to ask for +permission. For software which is copyrighted by the Free Software Foundation, +write to the Free Software Foundation; we sometimes make exceptions for this. +Our decision will be guided by the two goals of preserving the free status of +all derivatives of our free software and of promoting the sharing and reuse of +software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR +THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE +STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE +PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, +YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL +ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE +PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR +INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA +BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER +OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible +use to the public, the best way to achieve this is to make it free software +which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach +them to the start of each source file to most effectively convey the exclusion +of warranty; and each file should have at least the "copyright" line and a +pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + + Copyright (C) + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this when it +starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author Gnomovision comes + with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free + software, and you are welcome to redistribute it under certain conditions; + type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may be +called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your school, +if any, to sign a "copyright disclaimer" for the program, if necessary. Here +is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + signature of Ty Coon, 1 April 1989 + + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General Public +License instead of this License. + + +"CLASSPATH" EXCEPTION TO THE GPL + +Certain source files distributed by Oracle America and/or its affiliates are +subject to the following clarification and special exception to the GPL, but +only where Oracle has expressly included in the particular source file's header +the words "Oracle designates this particular file as subject to the "Classpath" +exception as provided by Oracle in the LICENSE file that accompanied this code." + + Linking this library statically or dynamically with other modules is making + a combined work based on this library. Thus, the terms and conditions of + the GNU General Public License cover the whole combination. + + As a special exception, the copyright holders of this library give you + permission to link this library with independent modules to produce an + executable, regardless of the license terms of these independent modules, + and to copy and distribute the resulting executable under terms of your + choice, provided that you also meet, for each linked independent module, + the terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. If + you modify this library, you may extend this exception to your version of + the library, but you are not obligated to do so. If you do not wish to do + so, delete this exception statement from your version. diff --git a/distribution/licenses/openjdk-NOTICE.txt b/distribution/licenses/openjdk-NOTICE.txt new file mode 100644 index 0000000000000..ca5ac831c8526 --- /dev/null +++ b/distribution/licenses/openjdk-NOTICE.txt @@ -0,0 +1,5 @@ +Copyright (c) 1995, 2013, Oracle and/or its affiliates. + +OpenJDK is licensed under the GPLv2+CE. A copy of that license is included in +this distribution immediately below this notice. You can find a copy of the +OpenJDK source through the downloads page at https://elastic.co. diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index ad0f4a5cdec08..85db63edd1ad8 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -18,6 +18,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import org.redline_rpm.header.Flags import java.nio.file.Files import java.nio.file.Path @@ -52,6 +53,7 @@ import java.util.regex.Pattern buildscript { repositories { maven { + name "gradle-plugins" url "https://plugins.gradle.org/m2/" } } @@ -60,22 +62,23 @@ buildscript { } } -void addProcessFilesTask(String type, boolean oss) { - String packagingFiles = "build/packaging/${ oss ? 'oss-' : ''}${type}" +void addProcessFilesTask(String type, boolean oss, boolean jdk) { + String packagingFiles = "build/packaging/${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" - task("process${oss ? 'Oss' : ''}${type.capitalize()}Files", type: Copy) { + String taskName = "process${oss ? 'Oss' : ''}${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" + task(taskName, type: Copy) { into packagingFiles with copySpec { from 'src/common' from "src/${type}" - MavenFilteringHack.filter(it, expansionsForDistribution(type, oss)) + MavenFilteringHack.filter(it, expansionsForDistribution(type, oss, jdk)) } into('etc/elasticsearch') { - with configFiles(type, oss) + with configFiles(type, oss, jdk) } - MavenFilteringHack.filter(it, expansionsForDistribution(type, oss)) + MavenFilteringHack.filter(it, expansionsForDistribution(type, oss, jdk)) doLast { // create empty dirs, we set the permissions when configuring the packages @@ -89,25 +92,30 @@ void addProcessFilesTask(String type, boolean oss) { } } } -addProcessFilesTask('deb', true) -addProcessFilesTask('deb', false) -addProcessFilesTask('rpm', true) -addProcessFilesTask('rpm', false) +addProcessFilesTask('deb', true, true) +addProcessFilesTask('deb', true, false) +addProcessFilesTask('deb', false, true) +addProcessFilesTask('deb', false, false) +addProcessFilesTask('rpm', true, true) +addProcessFilesTask('rpm', true, false) +addProcessFilesTask('rpm', false, true) +addProcessFilesTask('rpm', false, false) // Common configuration that is package dependent. This can't go in ospackage // since we have different templated files that need to be consumed, but the structure // is the same -Closure commonPackageConfig(String type, boolean oss) { +Closure commonPackageConfig(String type, boolean oss, boolean jdk) { return { - dependsOn "process${oss ? 'Oss' : ''}${type.capitalize()}Files" + dependsOn "process${oss ? 'Oss' : ''}${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" packageName "elasticsearch${oss ? '-oss' : ''}" arch (type == 'deb' ? 'amd64' : 'X86_64') // Follow elasticsearch's file naming convention - archiveName "${packageName}-${project.version}-${archString}.${type}" + String jdkString = jdk ? "" : "no-jdk-" + archiveName "${packageName}-${project.version}-${jdkString}${archString}.${type}" - String prefix = "${oss ? 'oss-' : ''}${type}" + String prefix = "${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" destinationDir = file("${prefix}/build/distributions") - String packagingFiles = "build/packaging/${prefix}" + String packagingFiles = "build/packaging/${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" String scripts = "${packagingFiles}/scripts" preInstall file("${scripts}/preinst") @@ -122,7 +130,7 @@ Closure commonPackageConfig(String type, boolean oss) { // specify it again explicitly for copying common files into('/usr/share/elasticsearch') { into('bin') { - with binFiles(type, oss) + with binFiles(type, oss, jdk) } from(rootProject.projectDir) { include 'README.textile' @@ -134,6 +142,11 @@ Closure commonPackageConfig(String type, boolean oss) { into('modules') { with modulesFiles(oss) } + if (jdk) { + into('jdk') { + with jdkFiles('linux') + } + } // we need to specify every intermediate directory in these paths so the package managers know they are explicitly // intended to manage them; otherwise they may be left behind on uninstallation. duplicate calls of the same // directory are fine @@ -141,7 +154,7 @@ Closure commonPackageConfig(String type, boolean oss) { String[] segments = fcp.relativePath.segments for (int i = segments.length - 2; i > 2; --i) { directory('/' + segments[0..i].join('/'), 0755) - if (segments[-2] == 'bin') { + if (segments[-2] == 'bin' || segments[-1] == 'jspawnhelper') { fcp.mode = 0755 } else { fcp.mode = 0644 @@ -194,7 +207,7 @@ Closure commonPackageConfig(String type, boolean oss) { createDirectoryEntry true fileType CONFIG | NOREPLACE } - String envFile = expansionsForDistribution(type, false)['path.env'] + String envFile = expansionsForDistribution(type, oss, jdk)['path.env'] configurationFile envFile into(new File(envFile).getParent()) { fileType CONFIG | NOREPLACE @@ -248,6 +261,9 @@ Closure commonPackageConfig(String type, boolean oss) { // the oss package conflicts with the default distribution and vice versa conflicts('elasticsearch' + (oss ? '' : '-oss')) + + into '/usr/share/elasticsearch' + with noticeFile(oss, jdk) } } @@ -282,12 +298,11 @@ ospackage { permissionGroup 'root' into '/usr/share/elasticsearch' - with noticeFile } -Closure commonDebConfig(boolean oss) { +Closure commonDebConfig(boolean oss, boolean jdk) { return { - configure(commonPackageConfig('deb', oss)) + configure(commonPackageConfig('deb', oss, jdk)) // jdeb does not provide a way to set the License control attribute, and ospackage // silently ignores setting it. Instead, we set the license as "custom field" @@ -310,16 +325,24 @@ Closure commonDebConfig(boolean oss) { } task buildDeb(type: Deb) { - configure(commonDebConfig(false)) + configure(commonDebConfig(false, true)) } task buildOssDeb(type: Deb) { - configure(commonDebConfig(true)) + configure(commonDebConfig(true, true)) +} + +task buildNoJdkDeb(type: Deb) { + configure(commonDebConfig(false, false)) +} + +task buildOssNoJdkDeb(type: Deb) { + configure(commonDebConfig(true, false)) } -Closure commonRpmConfig(boolean oss) { +Closure commonRpmConfig(boolean oss, boolean jdk) { return { - configure(commonPackageConfig('rpm', oss)) + configure(commonPackageConfig('rpm', oss, jdk)) if (oss) { license 'ASL 2.0' @@ -330,6 +353,8 @@ Closure commonRpmConfig(boolean oss) { packageGroup 'Application/Internet' requires '/bin/bash' + obsoletes packageName, '7.0.0', Flags.LESS + prefix '/usr' packager 'Elasticsearch' version = project.version.replace('-', '_') @@ -345,11 +370,19 @@ Closure commonRpmConfig(boolean oss) { } task buildRpm(type: Rpm) { - configure(commonRpmConfig(false)) + configure(commonRpmConfig(false, true)) } task buildOssRpm(type: Rpm) { - configure(commonRpmConfig(true)) + configure(commonRpmConfig(true, true)) +} + +task buildNoJdkRpm(type: Rpm) { + configure(commonRpmConfig(false, false)) +} + +task buildOssNoJdkRpm(type: Rpm) { + configure(commonRpmConfig(true, false)) } Closure dpkgExists = { it -> new File('/bin/dpkg-deb').exists() || new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } diff --git a/distribution/packages/no-jdk-deb/build.gradle b/distribution/packages/no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/no-jdk-deb/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/no-jdk-rpm/build.gradle b/distribution/packages/no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/no-jdk-rpm/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif b/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif new file mode 100755 index 0000000000000..e3b20998d5300 Binary files /dev/null and b/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif differ diff --git a/distribution/packages/oss-no-jdk-deb/build.gradle b/distribution/packages/oss-no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/oss-no-jdk-deb/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/oss-no-jdk-rpm/build.gradle b/distribution/packages/oss-no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/oss-no-jdk-rpm/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/src/common/scripts/postinst b/distribution/packages/src/common/scripts/postinst index 0a0b505b12b0a..b440bb807755c 100644 --- a/distribution/packages/src/common/scripts/postinst +++ b/distribution/packages/src/common/scripts/postinst @@ -94,11 +94,15 @@ elif [ "$RESTART_ON_UPGRADE" = "true" ]; then fi # the equivalent code for rpm is in posttrans -if [ "$PACKAGE" = "deb" -a ! -f /etc/elasticsearch/elasticsearch.keystore ]; then - /usr/share/elasticsearch/bin/elasticsearch-keystore create - chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore - chmod 660 /etc/elasticsearch/elasticsearch.keystore - md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum +if [ "$PACKAGE" = "deb" ]; then + if [ ! -f /etc/elasticsearch/elasticsearch.keystore ]; then + /usr/share/elasticsearch/bin/elasticsearch-keystore create + chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore + chmod 660 /etc/elasticsearch/elasticsearch.keystore + md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum + else + /usr/share/elasticsearch/bin/elasticsearch-keystore upgrade + fi fi ${scripts.footer} diff --git a/distribution/packages/src/common/scripts/posttrans b/distribution/packages/src/common/scripts/posttrans index d3550bdbed24b..fdb9aafba38f6 100644 --- a/distribution/packages/src/common/scripts/posttrans +++ b/distribution/packages/src/common/scripts/posttrans @@ -3,6 +3,8 @@ if [ ! -f /etc/elasticsearch/elasticsearch.keystore ]; then chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore chmod 660 /etc/elasticsearch/elasticsearch.keystore md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum +else + /usr/share/elasticsearch/bin/elasticsearch-keystore upgrade fi ${scripts.footer} diff --git a/distribution/packages/src/common/scripts/preinst b/distribution/packages/src/common/scripts/preinst index 0718e31b05ebe..66e5038a55daa 100644 --- a/distribution/packages/src/common/scripts/preinst +++ b/distribution/packages/src/common/scripts/preinst @@ -15,19 +15,6 @@ err_exit() { exit 1 } -# Check for these at preinst time due to failures in postinst if they do not exist -if [ -x "$JAVA_HOME/bin/java" ]; then - JAVA="$JAVA_HOME/bin/java" -elif command -v java; then - JAVA=`command -v java` -else - JAVA="" -fi - -if [ -z "$JAVA" ]; then - err_exit "could not find java; set JAVA_HOME or ensure java is in PATH" -fi - case "$1" in # Debian #################################################### diff --git a/distribution/packages/src/deb/init.d/elasticsearch b/distribution/packages/src/deb/init.d/elasticsearch index 1e503fa0cfad0..72eb635bba3f5 100755 --- a/distribution/packages/src/deb/init.d/elasticsearch +++ b/distribution/packages/src/deb/init.d/elasticsearch @@ -122,7 +122,7 @@ case "$1" in ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -gt $(cat /proc/sys/vm/max_map_count) ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ] && [ "$MAX_MAP_COUNT" -gt $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/distribution/packages/src/rpm/init.d/elasticsearch b/distribution/packages/src/rpm/init.d/elasticsearch index bdaf8fbec37de..99d8ef45ef6a9 100644 --- a/distribution/packages/src/rpm/init.d/elasticsearch +++ b/distribution/packages/src/rpm/init.d/elasticsearch @@ -90,7 +90,7 @@ start() { if [ -n "$MAX_LOCKED_MEMORY" ]; then ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -gt $(cat /proc/sys/vm/max_map_count) ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ] && [ "$MAX_MAP_COUNT" -gt $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 84e14eea3f6f8..8bdea4950cb75 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -20,7 +20,6 @@ ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR} $ES_JAVA_OPTS" -cd "$ES_HOME" # manual parsing to find out, if process should be detached if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then exec \ @@ -30,6 +29,7 @@ if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; -Des.path.conf="$ES_PATH_CONF" \ -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -Des.bundled_jdk="$ES_BUNDLED_JDK" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.bootstrap.Elasticsearch \ "$@" @@ -41,6 +41,7 @@ else -Des.path.conf="$ES_PATH_CONF" \ -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -Des.bundled_jdk="$ES_BUNDLED_JDK" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.bootstrap.Elasticsearch \ "$@" \ diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli index 5699b3feb58e4..ae0c88b2043e0 100644 --- a/distribution/src/bin/elasticsearch-cli +++ b/distribution/src/bin/elasticsearch-cli @@ -7,7 +7,7 @@ source "`dirname "$0"`"/elasticsearch-env IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES" for additional_source in "${additional_sources[@]}" do - source "`dirname "$0"`"/$additional_source + source "$ES_HOME"/bin/$additional_source done IFS=';' read -r -a additional_classpath_directories <<< "$ES_ADDITIONAL_CLASSPATH_DIRECTORIES" diff --git a/distribution/src/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env index d1dec54f93d77..2a490622b34b4 100644 --- a/distribution/src/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -36,16 +36,19 @@ ES_HOME=`dirname "$ES_HOME"` ES_CLASSPATH="$ES_HOME/lib/*" # now set the path to java -if [ -x "$JAVA_HOME/bin/java" ]; then +if [ ! -z "$JAVA_HOME" ]; then JAVA="$JAVA_HOME/bin/java" else - set +e - JAVA=`which java` - set -e + if [ "$(uname -s)" = "Darwin" ]; then + # OSX has a different structure + JAVA="$ES_HOME/jdk/Contents/Home/bin/java" + else + JAVA="$ES_HOME/jdk/bin/java" + fi fi if [ ! -x "$JAVA" ]; then - echo "could not find java; set JAVA_HOME or ensure java is in PATH" >&2 + echo "could not find java in JAVA_HOME or bundled at $JAVA" >&2 exit 1 fi @@ -79,7 +82,10 @@ ES_PATH_CONF=`cd "$ES_PATH_CONF"; pwd` ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} ES_DISTRIBUTION_TYPE=${es.distribution.type} +ES_BUNDLED_JDK=${es.bundled_jdk} if [ -z "$ES_TMPDIR" ]; then ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory` fi + +cd "$ES_HOME" diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index 7c4b8dc49f47c..395a66878c42c 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -20,11 +20,12 @@ rem now set the path to java if defined JAVA_HOME ( set JAVA="%JAVA_HOME%\bin\java.exe" ) else ( - for %%I in (java.exe) do set JAVA="%%~$PATH:I" + set JAVA="%ES_HOME%\jdk\bin\java.exe" + set JAVA_HOME="%ES_HOME%\jdk" ) if not exist %JAVA% ( - echo could not find java; set JAVA_HOME or ensure java is in PATH 1>&2 + echo "could not find java in JAVA_HOME or bundled at %ES_HOME%\jdk" >&2 exit /b 1 ) @@ -55,7 +56,10 @@ for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI set ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} set ES_DISTRIBUTION_TYPE=${es.distribution.type} +set ES_BUNDLED_JDK=${es.bundled_jdk} if not defined ES_TMPDIR ( - for /f "tokens=* usebackq" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory""`) do set ES_TMPDIR=%%a + for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a ) + +cd /d "%ES_HOME%" diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index fc62c07ac9d3c..dc25d12b1b99c 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -164,7 +164,7 @@ if "%JVM_SS%" == "" ( goto:eof ) -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.path.conf="%ES_PATH_CONF%";-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%";-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.path.conf="%ES_PATH_CONF%";-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%";-Des.distribution.type="%ES_DISTRIBUTION_TYPE%";-Des.bundled_jdk="%ES_BUNDLED_JDK%" if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0 diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index 9b67fa2e0ffa6..73741a2261e71 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -41,17 +41,16 @@ IF ERRORLEVEL 1 ( EXIT /B %ERRORLEVEL% ) -set "ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options" +set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options @setlocal -for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a +for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a @endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% %ES_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( exit /b 1 ) -cd /d "%ES_HOME%" -%JAVA% %ES_JAVA_OPTS% -Delasticsearch -Des.path.home="%ES_HOME%" -Des.path.conf="%ES_PATH_CONF%" -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! +%JAVA% %ES_JAVA_OPTS% -Delasticsearch -Des.path.home="%ES_HOME%" -Des.path.conf="%ES_PATH_CONF%" -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" -Des.bundled_jdk="%ES_BUNDLED_JDK%" -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! endlocal endlocal diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 2b30d6a87b4a1..58fe4721723a6 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -117,6 +117,3 @@ ${error.file} # due to internationalization enhancements in JDK 9 Elasticsearch need to set the provider to COMPAT otherwise # time/date parsing will break in an incompatible way for some date patterns and locals 9-:-Djava.locale.providers=COMPAT - -# temporary workaround for C2 bug with JDK 10 on hardware with AVX-512 -10-:-XX:UseAVX=2 diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 03ac32d20b7d6..c8fe9e1121953 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -7,7 +7,7 @@ forbiddenApisMain { replaceSignatureFiles 'jdk-signatures' } -unitTest.enabled = false +test.enabled = false javadoc.enabled = false loggerUsageCheck.enabled = false jarHell.enabled = false diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 34544d724f411..61e3546ed8919 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -24,8 +24,8 @@ archivesBaseName = 'elasticsearch-plugin-cli' dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" compileOnly "org.elasticsearch:elasticsearch-cli:${version}" - compile "org.bouncycastle:bcpg-jdk15on:1.59" - compile "org.bouncycastle:bcprov-jdk15on:1.59" + compile "org.bouncycastle:bcpg-jdk15on:${versions.bouncycastle}" + compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" testCompile "org.elasticsearch.test:framework:${version}" testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' @@ -35,7 +35,7 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -unitTest { +test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' } diff --git a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 deleted file mode 100644 index 0c0be50c906a3..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee93e5376bb6cf0a15c027b5f5e4393f2738e709 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..e7c20268ef534 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +422656435514ab8a28752b117d5d2646660a0ace \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 deleted file mode 100644 index aa42dbb8f6906..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2507204241ab450456bdb8e8c0a8f986e418bd99 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..0ccfcd61a0e59 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcprov-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +00df4b474e71be02c1349c3292d98886f888d1f7 \ No newline at end of file diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 176d3324cd134..0429b617f7e7f 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -31,6 +31,8 @@ for its modifiers: This is most useful when you have text and snippets that work together to tell the story of some use case because it merges the snippets (and thus the use case) into one big test. + * You can't use `// TEST[continued]` immediately after `// TESTSETUP` or + `// TEARDOWN`. * `// TEST[skip:reason]`: Skip this test. Replace `reason` with the actual reason to skip the test. Snippets without `// TEST` or `// CONSOLE` aren't considered tests anyway but this is useful for explicitly documenting the @@ -60,9 +62,11 @@ for its modifiers: "figures out" the path. This is especially useful for making sweeping assertions like "I made up all the numbers in this example, don't compare them" which looks like `// TESTRESPONSE[s/\d+/$body.$_path/]`. - * `// TESTRESPONSE[_cat]`: Add substitutions for testing `_cat` responses. Use - this after all other substitutions so it doesn't make other substitutions - difficult. + * You can't use `// TESTRESPONSE` immediately after `// TESTSETUP`. Instead, + consider using `// TEST[continued]` or rearrange your snippets. + * `// TESTRESPONSE[non_json]`: Add substitutions for testing responses in a + format other than JSON. Use this after all other substitutions so it doesn't + make other substitutions difficult. * `// TESTRESPONSE[skip:reason]`: Skip the assertions specified by this response. * `// TESTSETUP`: Marks this snippet as the "setup" for all other snippets in @@ -74,7 +78,11 @@ for its modifiers: but rather than the setup defined in `docs/build.gradle` the setup is defined right in the documentation file. In general, we should prefer `// TESTSETUP` over `// TEST[setup:name]` because it makes it more clear what steps have to - be taken before the examples will work. + be taken before the examples will work. Tip: `// TESTSETUP` can only be used + on the first snippet of a document. +* `// TEARDOWN`: Ends and cleans up a test series started with `// TESTSETUP` or + `// TEST[setup:name]`. You can use `// TEARDOWN` to set up multiple tests in + the same file. * `// NOTCONSOLE`: Marks this snippet as neither `// CONSOLE` nor `// TESTRESPONSE`, excluding it from the list of unconverted snippets. We should only use this for snippets that *are* JSON but are *not* responses or diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 8a446bf037a12..4d64deada15c8 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,17 +1,12 @@ -:version: 7.0.0-alpha2 -:major-version: 7.x + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] + :lucene_version: 8.0.0 :lucene_version_path: 8_0_0 -:branch: master :jdk: 1.8.0_131 :jdk_major: 8 :build_flavor: default - -////////// -release-state can be: released | prerelease | unreleased -////////// - -:release-state: prerelease +:build_type: tar :issue: https://github.com/elastic/elasticsearch/issues/ :ml-issue: https://github.com/elastic/ml-cpp/issues/ @@ -40,6 +35,7 @@ ifeval::["{release-state}"=="unreleased"] :percolator-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version}-SNAPSHOT :matrixstats-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version}-SNAPSHOT :rank-eval-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version}-SNAPSHOT +:version_qualified: {bare_version}-SNAPSHOT endif::[] ifeval::["{release-state}"!="unreleased"] @@ -53,6 +49,7 @@ ifeval::["{release-state}"!="unreleased"] :percolator-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version} :matrixstats-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version} :rank-eval-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version} +:version_qualified: {bare_version} endif::[] :javadoc-client: {rest-high-level-client-javadoc}/org/elasticsearch/client diff --git a/docs/build.gradle b/docs/build.gradle index 1083d07b94f46..5b98a62d99640 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -840,7 +840,7 @@ buildRestTests.setups['sensor_prefab_data'] = ''' ''' buildRestTests.setups['sample_job'] = ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "sample_job" body: > { @@ -894,7 +894,7 @@ buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index ''' buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "farequote" body: > { @@ -914,7 +914,7 @@ buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] ''' buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-farequote" body: > { @@ -978,7 +978,7 @@ buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_met ''' buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "total-requests" body: > { @@ -1000,7 +1000,7 @@ buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metr ''' buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-total-requests" body: > { @@ -1010,22 +1010,22 @@ buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server ''' buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + ''' - do: - xpack.ml.open_job: + ml.open_job: job_id: "total-requests" ''' buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.start_datafeed: + ml.start_datafeed: datafeed_id: "datafeed-total-requests" ''' buildRestTests.setups['calendar_outages'] = ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } @@ -1034,12 +1034,12 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale ''' buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" body: > { @@ -1048,7 +1048,7 @@ buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server ''' buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "events" : [ diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index 58a6e625aa8b3..def32890a62ab 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -13,14 +13,12 @@ a number of clients that have been contributed by the community for various lang * <> * <> * <> -* <> * <> * <> * <> * <> * <> * <> -* <> * <> * <> * <> @@ -55,20 +53,12 @@ a number of clients that have been contributed by the community for various lang * https://www.forgebox.io/view/cbelasticsearch[cbElasticSearch] Native ColdFusion (CFML) support for the ColdBox MVC Platform which provides you with a fluent search interface for Elasticsearch, in addition to a CacheBox Cache provider and a Logbox Appender for logging. -The following project appears to be abandoned: - -* https://github.com/jasonfill/ColdFusion-ElasticSearch-Client[ColdFusion-Elasticsearch-Client] - ColdFusion client for Elasticsearch - [[erlang]] == Erlang * http://github.com/tsloughter/erlastic_search[erlastic_search]: Erlang client using HTTP. -* https://github.com/dieswaytoofast/erlasticsearch[erlasticsearch]: - Erlang client using Thrift. - * https://github.com/datahogs/tirexs[Tirexs]: An https://github.com/elixir-lang/elixir[Elixir] based API/DSL, inspired by http://github.com/karmi/tire[Tire]. Ready to use in pure Erlang @@ -78,12 +68,11 @@ The following project appears to be abandoned: [[go]] == Go +Also see the {client}/go-api/current/index.html[official Elasticsearch Go client]. + * https://github.com/mattbaird/elastigo[elastigo]: Go client. -* https://github.com/belogik/goes[goes]: - Go lib. - * https://github.com/olivere/elastic[elastic]: Elasticsearch client for Google Go. @@ -91,11 +80,6 @@ The following project appears to be abandoned: Golang lib for Elasticsearch client. -[[groovy]] -== Groovy - -See the {client}/groovy-api/current/index.html[official Elasticsearch Groovy client]. - [[haskell]] == Haskell * https://github.com/bitemyapp/bloodhound[bloodhound]: @@ -117,19 +101,6 @@ Also see the {client}/java-api/current/index.html[official Elasticsearch Java cl Also see the {client}/javascript-api/current/index.html[official Elasticsearch JavaScript client]. -* https://github.com/fullscale/elastic.js[Elastic.js]: - A JavaScript implementation of the Elasticsearch Query DSL and Core API. - -* https://github.com/printercu/elastics[elastics]: Simple tiny client that just works - -* https://github.com/roundscope/ember-data-elasticsearch-kit[ember-data-elasticsearch-kit]: - An ember-data kit for both pushing and querying objects to Elasticsearch cluster - -The following project appears to be abandoned: - -* https://github.com/ramv/node-elastical[node-elastical]: - Node.js client for the Elasticsearch REST API - [[kotlin]] == Kotlin @@ -150,17 +121,6 @@ The following project appears to be abandoned: Also see the {client}/net-api/current/index.html[official Elasticsearch .NET client]. -* https://github.com/Yegoroff/PlainElastic.Net[PlainElastic.Net]: - .NET client. - -[[ocaml]] -== OCaml - -The following project appears to be abandoned: - -* https://github.com/tovbinm/ocaml-elasticsearch[ocaml-elasticsearch]: - OCaml client for Elasticsearch - [[perl]] == Perl @@ -186,26 +146,6 @@ Also see the {client}/php-api/current/index.html[official Elasticsearch PHP clie Also see the {client}/python-api/current/index.html[official Elasticsearch Python client]. -* http://github.com/rhec/pyelasticsearch[pyelasticsearch]: - Python client. - -* https://github.com/eriky/ESClient[ESClient]: - A lightweight and easy to use Python client for Elasticsearch. - -* https://github.com/mozilla/elasticutils/[elasticutils]: - A friendly chainable Elasticsearch interface for Python. - -* http://github.com/aparo/pyes[pyes]: - Python client. - -The following projects appear to be abandoned: - -* https://github.com/humangeo/rawes[rawes]: - Python low level client. - -* http://intridea.github.io/surfiki-refine-elasticsearch/[Surfiki Refine]: - Python Map-Reduce engine targeting Elasticsearch indices. - [[r]] == R @@ -218,19 +158,11 @@ The following projects appear to be abandoned: * https://github.com/UptakeOpenSource/uptasticsearch[uptasticsearch]: An R client tailored to data science workflows. -The following projects appear to be abandoned: - -* https://github.com/Tomesch/elasticsearch[elasticsearch] - R client for Elasticsearch - [[ruby]] == Ruby Also see the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client]. -* https://github.com/PoseBiz/stretcher[stretcher]: - Ruby client. - * https://github.com/printercu/elastics-rb[elastics]: Tiny client with built-in zero-downtime migrations and ActiveRecord integration. @@ -243,14 +175,6 @@ Also see the {client}/ruby-api/current/index.html[official Elasticsearch Ruby cl * https://github.com/artsy/estella[Estella]: Make your Ruby models searchable -The following projects appear to be abandoned: - -* https://github.com/wireframe/elastic_searchable/[elastic_searchable]: - Ruby client + Rails integration. - -* https://github.com/ddnexus/flex[Flex]: - Ruby Client. - [[rust]] == Rust @@ -275,15 +199,6 @@ The following projects appear to be abandoned: * https://github.com/SumoLogic/elasticsearch-client[elasticsearch-client]: Scala DSL that uses the REST API. Akka and AWS helpers included. -The following projects appear to be abandoned: - -* https://github.com/scalastuff/esclient[esclient]: - Thin Scala client. - -* https://github.com/bsadeh/scalastic[scalastic]: - Scala client. - - [[smalltalk]] == Smalltalk @@ -293,9 +208,8 @@ The following projects appear to be abandoned: * http://ss3.gemstone.com/ss/Elasticsearch.html[Elasticsearch] - Smalltalk client for Elasticsearch - [[vertx]] == Vert.x -* https://github.com/goodow/realtime-search[realtime-search]: - Elasticsearch module for Vert.x +* https://github.com/reactiverse/elasticsearch-client[elasticsearch-client]: + An Elasticsearch client for Eclipse Vert.x diff --git a/docs/java-api/docs/update.asciidoc b/docs/java-api/docs/update.asciidoc index 1c2211be9ba13..0935c9f11eca4 100644 --- a/docs/java-api/docs/update.asciidoc +++ b/docs/java-api/docs/update.asciidoc @@ -22,7 +22,9 @@ Or you can use `prepareUpdate()` method: [source,java] -------------------------------------------------- client.prepareUpdate("ttl", "doc", "1") - .setScript(new Script("ctx._source.gender = \"male\"" <1> , ScriptService.ScriptType.INLINE, null, null)) + .setScript(new Script( + "ctx._source.gender = \"male\"", <1> + ScriptService.ScriptType.INLINE, null, null)) .get(); client.prepareUpdate("ttl", "doc", "1") diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index e5eb2a6b02062..4a7fd7482d26e 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -1,8 +1,8 @@ -[[java-api]] = Java API include::../Versions.asciidoc[] +[[java-api]] [preface] == Preface @@ -57,7 +57,7 @@ For Maven: elastic-lucene-snapshots Elastic Lucene Snapshots - http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9 + https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9 true false @@ -68,7 +68,8 @@ For Gradle: ["source","groovy",subs="attributes"] -------------------------------------------------- maven { - url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9' + name "lucene-snapshots" + url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9' } -------------------------------------------------- diff --git a/docs/java-api/query-dsl/type-query.asciidoc b/docs/java-api/query-dsl/type-query.asciidoc index 93c7bd76dfe34..160deedb9eaca 100644 --- a/docs/java-api/query-dsl/type-query.asciidoc +++ b/docs/java-api/query-dsl/type-query.asciidoc @@ -1,7 +1,10 @@ [[java-query-dsl-type-query]] ==== Type Query -deprecated[7.0.0, Types are being removed, prefer filtering on a field instead. For more information, please see {ref}/removal-of-types.html[Removal of mapping types].] +deprecated[7.0.0] + +Types are being removed, prefer filtering on a field instead. For +more information, see {ref}/removal-of-types.html[Removal of mapping types]. See {ref}/query-dsl-type-query.html[Type Query] diff --git a/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc index f79dbd5d39de3..49aee815b89bc 100644 --- a/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: DeleteAutoFollowPatternRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/forget_follower.asciidoc b/docs/java-rest/high-level/ccr/forget_follower.asciidoc new file mode 100644 index 0000000000000..b889993a4e9b9 --- /dev/null +++ b/docs/java-rest/high-level/ccr/forget_follower.asciidoc @@ -0,0 +1,45 @@ +-- +:api: ccr-forget-follower +:request: ForgetFollowerRequest +:response: BroadcastResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Forget Follower API + +[id="{upid}-{api}-request"] +==== Request + +The Forget Follower API allows you to manually remove the follower retention +leases from the leader. Note that these retention leases are automatically +managed by the following index. This API exists only for cases when invoking +the unfollow API on the follower index is unable to remove the follower +retention leases. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of the cluster containing the follower index. +<2> The name of the follower index. +<3> The UUID of the follower index (can be obtained from index stats). +<4> The alias of the remote cluster containing the leader index. +<5> The name of the leader index. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the response was successful. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The high-level shards summary. +<2> The total number of shards the request was executed on. +<3> The total number of shards the request was successful on. +<4> The total number of shards the request was skipped on (should always be zero). +<5> The total number of shards the request failed on. +<6> The shard-level failures. + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc index 61ab8d58e9cc3..98c9e5410193b 100644 --- a/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: GetAutoFollowPatternRequest :response: GetAutoFollowPatternResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/get_follow_info.asciidoc b/docs/java-rest/high-level/ccr/get_follow_info.asciidoc new file mode 100644 index 0000000000000..70a71c1c90b76 --- /dev/null +++ b/docs/java-rest/high-level/ccr/get_follow_info.asciidoc @@ -0,0 +1,35 @@ +-- +:api: ccr-get-follow-info +:request: FollowInfoRequest +:response: FollowInfoResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Get Follow Info API + + +[id="{upid}-{api}-request"] +==== Request + +The Get Follow Info API allows you to get follow information (parameters and status) for specific follower indices. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The follower index to get follow information for. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ includes follow information for the specified follower indices + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The follow information for specified follower indices. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc b/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc index 15b98abc6862e..a510a53b70cc2 100644 --- a/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc +++ b/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc @@ -3,7 +3,7 @@ :request: FollowStatsRequest :response: FollowStatsResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Follow Stats API diff --git a/docs/java-rest/high-level/ccr/get_stats.asciidoc b/docs/java-rest/high-level/ccr/get_stats.asciidoc index 28c9e107a09f2..6c8502302fcc5 100644 --- a/docs/java-rest/high-level/ccr/get_stats.asciidoc +++ b/docs/java-rest/high-level/ccr/get_stats.asciidoc @@ -3,7 +3,7 @@ :request: CcrStatsRequest :response: CcrStatsResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get CCR Stats API diff --git a/docs/java-rest/high-level/ccr/pause_follow.asciidoc b/docs/java-rest/high-level/ccr/pause_follow.asciidoc index de81afa1e83b3..70694da0e815a 100644 --- a/docs/java-rest/high-level/ccr/pause_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/pause_follow.asciidoc @@ -3,7 +3,7 @@ :request: PauseFollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Pause Follow API diff --git a/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc index e6cc6b89ee8ba..7ee9ccbe9d692 100644 --- a/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: PutAutoFollowPatternRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/put_follow.asciidoc b/docs/java-rest/high-level/ccr/put_follow.asciidoc index 2f40bbd5d2b2d..c1991dcf492fd 100644 --- a/docs/java-rest/high-level/ccr/put_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/put_follow.asciidoc @@ -3,7 +3,7 @@ :request: PutFollowRequest :response: PutFollowResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Follow API diff --git a/docs/java-rest/high-level/ccr/resume_follow.asciidoc b/docs/java-rest/high-level/ccr/resume_follow.asciidoc index 18d69b69d4979..e30f83115fa9c 100644 --- a/docs/java-rest/high-level/ccr/resume_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/resume_follow.asciidoc @@ -3,7 +3,7 @@ :request: ResumeFollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Resume Follow API diff --git a/docs/java-rest/high-level/ccr/unfollow.asciidoc b/docs/java-rest/high-level/ccr/unfollow.asciidoc index 779b8c3f586c4..946a2c6e61812 100644 --- a/docs/java-rest/high-level/ccr/unfollow.asciidoc +++ b/docs/java-rest/high-level/ccr/unfollow.asciidoc @@ -3,7 +3,7 @@ :request: UnfollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Unfollow API diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 685122771392c..89912cc2a4593 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -83,7 +83,7 @@ dependencies { The very first releases of any major version (like a beta), might have been built on top of a Lucene Snapshot version. In such a case you will be unable to resolve the Lucene dependencies of the client. -For example, if you want to use the `7.0.0-alpha2` version which depends on Lucene `8.0.0-snapshot-774e9aefbc`, you must +For example, if you want to use the `7.0.0-beta1` version which depends on Lucene `8.0.0-snapshot-83f9835`, you must define the following repository. For Maven: @@ -93,7 +93,7 @@ For Maven: elastic-lucene-snapshots Elastic Lucene Snapshots - http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/774e9aefbc + https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835 true false @@ -104,7 +104,8 @@ For Gradle: ["source","groovy",subs="attributes"] -------------------------------------------------- maven { - url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/774e9aefbc' + name 'lucene-snapshots' + url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835' } -------------------------------------------------- diff --git a/docs/java-rest/high-level/graph/explore.asciidoc b/docs/java-rest/high-level/graph/explore.asciidoc index f2718209f4b90..a178dfbc3a42a 100644 --- a/docs/java-rest/high-level/graph/explore.asciidoc +++ b/docs/java-rest/high-level/graph/explore.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-graph-explore]] === X-Pack Graph explore API diff --git a/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc index e6f100294aec4..a68a2d9de5baf 100644 --- a/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: DeleteLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc b/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc index 028f34793fef4..b85d482299adb 100644 --- a/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc +++ b/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc @@ -3,7 +3,7 @@ :request: ExplainLifecycleRequest :response: ExplainLifecycleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Explain Lifecycle API diff --git a/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc index b86fad5880f67..506c2c736e545 100644 --- a/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: GetLifecyclePolicyRequest :response: GetLifecyclePolicyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc index 713c5480cae04..6bf4344477eac 100644 --- a/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc +++ b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc @@ -3,7 +3,7 @@ :request: LifecycleManagementStatusRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Index Lifecycle Management Status API diff --git a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc index 75103fa5bdfd9..7947f54ffbc7a 100644 --- a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: PutLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc b/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc index 96c6b0be1b131..4b12e89d6aa49 100644 --- a/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc +++ b/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc @@ -3,7 +3,7 @@ :request: RemoveIndexLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Remove Policy from Index API diff --git a/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc index 89dd4ea1cfa6b..2798b1fecfd35 100644 --- a/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: RetryLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Retry Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc index d65e7dd5009fb..20a7725966339 100644 --- a/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc +++ b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc @@ -3,7 +3,7 @@ :request: StartILMRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Start Index Lifecycle Management API diff --git a/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc index 85117fe311a3a..04c30e1012f88 100644 --- a/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc +++ b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc @@ -3,7 +3,7 @@ :request: StopILMRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Stop Index Lifecycle Management API diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index a42a9352faace..c8db57f52598d 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -45,6 +45,8 @@ The <> page shows <> brought by the high-level client. +// This ID is bad but it is the one we've had forever. +[[_changing_the_client_8217_s_initialization_code]] === Changing the client's initialization code The `TransportClient` is typically initialized as follows: @@ -94,6 +96,8 @@ must be replaced with: include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-client-close] -------------------------------------------------- +// This ID is bad but it is the one we've had forever. +[[_changing_the_application_8217_s_code]] === Changing the application's code The `RestHighLevelClient` supports the same request and response objects diff --git a/docs/java-rest/high-level/migration/get-assistance.asciidoc b/docs/java-rest/high-level/migration/get-assistance.asciidoc deleted file mode 100644 index 723eb7d09053d..0000000000000 --- a/docs/java-rest/high-level/migration/get-assistance.asciidoc +++ /dev/null @@ -1,49 +0,0 @@ -[[java-rest-high-migration-get-assistance]] -=== Migration Get Assistance - -[[java-rest-high-migration-get-assistance-request]] -==== Index Upgrade Info Request - -An `IndexUpgradeInfoRequest` does not require any argument: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[get-assistance-request] --------------------------------------------------- -<1> Create a new request instance - -==== Optional arguments -The following arguments can optionally be provided: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[get-assistance-request-indices] --------------------------------------------------- -<1> Set the indices to the request - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[get-assistance-request-indices-options] --------------------------------------------------- -<1> Set the `IndicesOptions` to control how unavailable indices are resolved and -how wildcard expressions are expanded - -[[java-rest-high-migration-get-assistance-execution]] -==== Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[get-assistance-execute] --------------------------------------------------- - -[[java-rest-high-migration-get-assistance-response]] -==== Response - -The returned `IndexUpgradeInfoResponse` contains the actions required for each index. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[get-assistance-response] --------------------------------------------------- -<1> Retrieve the index -<2> Retrieve the action required for the migration of the current index diff --git a/docs/java-rest/high-level/migration/upgrade.asciidoc b/docs/java-rest/high-level/migration/upgrade.asciidoc deleted file mode 100644 index feabfa4ee48e5..0000000000000 --- a/docs/java-rest/high-level/migration/upgrade.asciidoc +++ /dev/null @@ -1,74 +0,0 @@ --- -:api: upgrade -:request: IndexUpgradeRequest -:response: BulkByScrollResponse -:submit_response: IndexUpgradeSubmissionResponse --- - -[[java-rest-high-migration-upgrade]] -=== Migration Upgrade - -[[java-rest-high-migration-upgrade-request]] -==== Index Upgrade Request - -An +{request}+ requires an index argument. Only one index at the time should be upgraded: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Create a new request instance - -[[java-rest-high-migration-upgrade-execution]] -==== Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-execute] --------------------------------------------------- - -[[java-rest-high-migration-upgrade-response]] -==== Response -The returned +{response}+ contains information about the executed operation - - -[[java-rest-high-migration-async-upgrade-request]] -==== Asynchronous Execution - -The asynchronous execution of an upgrade request requires both the +{request}+ -instance and an `ActionListener` instance to be passed to the asynchronous -method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-async-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument and contains a list of individual results for each -operation that was executed. Note that one or more operations might have -failed while the others have been successfully executed. -<2> Called when the whole +{request}+ fails. In this case the raised -exception is provided as an argument and no operation has been executed. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-async-execute] --------------------------------------------------- -<1> The +{request}+ to execute and the `ActionListener` to use when -the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - - -=== Migration Upgrade with Task API -Submission of upgrade request task will requires the +{request}+ and will return -+{submit_response}+. The +{submit_response}+ can later be use to fetch -TaskId and query the Task API for results. - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-task-api] --------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc index 8a38b498629cf..7dfac598f3604 100644 --- a/docs/java-rest/high-level/ml/close-job.asciidoc +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -3,6 +3,7 @@ :request: CloseJobRequest :response: CloseJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Close Job API diff --git a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc index dcd09a0581ddf..cf595bf59a960 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarEventRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Calendar Event API Removes a scheduled event from an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc index 4e55a221b85ab..0d1e18334a924 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarJobRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Calendar Job API Removes {ml} jobs from an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-calendar.asciidoc b/docs/java-rest/high-level/ml/delete-calendar.asciidoc index e7d5318a465d0..ca13e214909c9 100644 --- a/docs/java-rest/high-level/ml/delete-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Calendar API Delete a {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc index 02bfafd795187..85d098269e027 100644 --- a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: DeleteDatafeedRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-delete-datafeed"] === Delete Datafeed API diff --git a/docs/java-rest/high-level/ml/delete-expired-data.asciidoc b/docs/java-rest/high-level/ml/delete-expired-data.asciidoc index 03bd013b2abde..8dc47750cbefe 100644 --- a/docs/java-rest/high-level/ml/delete-expired-data.asciidoc +++ b/docs/java-rest/high-level/ml/delete-expired-data.asciidoc @@ -4,6 +4,7 @@ :request: DeleteExpiredRequest :response: DeleteExpiredResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Expired Data API Delete expired {ml} data. diff --git a/docs/java-rest/high-level/ml/delete-filter.asciidoc b/docs/java-rest/high-level/ml/delete-filter.asciidoc index abdcdcb53920a..7c68414d67c0b 100644 --- a/docs/java-rest/high-level/ml/delete-filter.asciidoc +++ b/docs/java-rest/high-level/ml/delete-filter.asciidoc @@ -3,6 +3,7 @@ :request: DeleteFilterRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Filter API Delete a {ml} filter. diff --git a/docs/java-rest/high-level/ml/delete-forecast.asciidoc b/docs/java-rest/high-level/ml/delete-forecast.asciidoc index 961254b481552..2c654c9a23083 100644 --- a/docs/java-rest/high-level/ml/delete-forecast.asciidoc +++ b/docs/java-rest/high-level/ml/delete-forecast.asciidoc @@ -3,6 +3,7 @@ :request: DeleteForecastRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Forecast API diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc index a8c6b276dd484..49ce2ac79b043 100644 --- a/docs/java-rest/high-level/ml/delete-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -3,6 +3,7 @@ :request: DeleteJobRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Job API diff --git a/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc index 6ede01901dabe..b9a4f5f37367b 100644 --- a/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc @@ -3,6 +3,7 @@ :request: DeleteModelSnapshotRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Model Snapshot API diff --git a/docs/java-rest/high-level/ml/find-file-structure.asciidoc b/docs/java-rest/high-level/ml/find-file-structure.asciidoc index 5882fc0fce2e2..bb0fbea91b32e 100644 --- a/docs/java-rest/high-level/ml/find-file-structure.asciidoc +++ b/docs/java-rest/high-level/ml/find-file-structure.asciidoc @@ -3,6 +3,7 @@ :request: FindFileStructureRequest :response: FindFileStructureResponse -- +[role="xpack"] [id="{upid}-{api}"] === Find File Structure API diff --git a/docs/java-rest/high-level/ml/flush-job.asciidoc b/docs/java-rest/high-level/ml/flush-job.asciidoc index e721d48d4d1de..2300377801ebb 100644 --- a/docs/java-rest/high-level/ml/flush-job.asciidoc +++ b/docs/java-rest/high-level/ml/flush-job.asciidoc @@ -3,6 +3,7 @@ :request: FlushJobRequest :response: FlushJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Flush Job API diff --git a/docs/java-rest/high-level/ml/forecast-job.asciidoc b/docs/java-rest/high-level/ml/forecast-job.asciidoc index 48d899d681449..d9a1b615cacd2 100644 --- a/docs/java-rest/high-level/ml/forecast-job.asciidoc +++ b/docs/java-rest/high-level/ml/forecast-job.asciidoc @@ -3,6 +3,7 @@ :request: ForecastJobRequest :response: ForecastJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Forecast Job API diff --git a/docs/java-rest/high-level/ml/get-buckets.asciidoc b/docs/java-rest/high-level/ml/get-buckets.asciidoc index f150695befe06..b24e8533019f8 100644 --- a/docs/java-rest/high-level/ml/get-buckets.asciidoc +++ b/docs/java-rest/high-level/ml/get-buckets.asciidoc @@ -3,6 +3,7 @@ :request: GetBucketsRequest :response: GetBucketsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Buckets API diff --git a/docs/java-rest/high-level/ml/get-calendar-events.asciidoc b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc index 486921fdcc9b2..1aedbcf3ac296 100644 --- a/docs/java-rest/high-level/ml/get-calendar-events.asciidoc +++ b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc @@ -3,6 +3,7 @@ :request: GetCalendarEventsRequest :response: GetCalendarEventsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Calendar Events API Retrieves a calendars events. diff --git a/docs/java-rest/high-level/ml/get-calendars.asciidoc b/docs/java-rest/high-level/ml/get-calendars.asciidoc index 7c78612e064e8..cd23ac4dfe9f5 100644 --- a/docs/java-rest/high-level/ml/get-calendars.asciidoc +++ b/docs/java-rest/high-level/ml/get-calendars.asciidoc @@ -3,6 +3,7 @@ :request: GetCalendarsRequest :response: GetCalendarsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Calendars API Retrieves one or more calendar objects. diff --git a/docs/java-rest/high-level/ml/get-categories.asciidoc b/docs/java-rest/high-level/ml/get-categories.asciidoc index 0aa0c7696cc55..56ed0bdc1dbf6 100644 --- a/docs/java-rest/high-level/ml/get-categories.asciidoc +++ b/docs/java-rest/high-level/ml/get-categories.asciidoc @@ -3,6 +3,7 @@ :request: GetCategoriesRequest :response: GetCategoriesResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Categories API diff --git a/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc b/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc index 47486669dfc3a..04b4fd943e6d6 100644 --- a/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc +++ b/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc @@ -3,6 +3,7 @@ :request: GetDatafeedStatsRequest :response: GetDatafeedStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Datafeed Stats API diff --git a/docs/java-rest/high-level/ml/get-datafeed.asciidoc b/docs/java-rest/high-level/ml/get-datafeed.asciidoc index b624a84c86c4f..d5c01c3e9f64b 100644 --- a/docs/java-rest/high-level/ml/get-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/get-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: GetDatafeedRequest :response: GetDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Datafeed API diff --git a/docs/java-rest/high-level/ml/get-filters.asciidoc b/docs/java-rest/high-level/ml/get-filters.asciidoc index 5c0dc5bc2c6d7..914fda7b4b60e 100644 --- a/docs/java-rest/high-level/ml/get-filters.asciidoc +++ b/docs/java-rest/high-level/ml/get-filters.asciidoc @@ -3,6 +3,7 @@ :request: GetFiltersRequest :response: GetFiltersResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Filters API diff --git a/docs/java-rest/high-level/ml/get-influencers.asciidoc b/docs/java-rest/high-level/ml/get-influencers.asciidoc index 6167a62e5a34f..242baff378ff3 100644 --- a/docs/java-rest/high-level/ml/get-influencers.asciidoc +++ b/docs/java-rest/high-level/ml/get-influencers.asciidoc @@ -3,6 +3,7 @@ :request: GetInfluencersRequest :response: GetInfluencersResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Influencers API diff --git a/docs/java-rest/high-level/ml/get-info.asciidoc b/docs/java-rest/high-level/ml/get-info.asciidoc index 42da753329415..7c6a66d03d87e 100644 --- a/docs/java-rest/high-level/ml/get-info.asciidoc +++ b/docs/java-rest/high-level/ml/get-info.asciidoc @@ -3,6 +3,7 @@ :request: MlInfoRequest :response: MlInfoResponse -- +[role="xpack"] [id="{upid}-{api}"] === ML Get Info API diff --git a/docs/java-rest/high-level/ml/get-job-stats.asciidoc b/docs/java-rest/high-level/ml/get-job-stats.asciidoc index afea6f1a104c7..9e07c9e89f42e 100644 --- a/docs/java-rest/high-level/ml/get-job-stats.asciidoc +++ b/docs/java-rest/high-level/ml/get-job-stats.asciidoc @@ -3,6 +3,7 @@ :request: GetJobStatsRequest :response: GetJobStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Job Stats API diff --git a/docs/java-rest/high-level/ml/get-job.asciidoc b/docs/java-rest/high-level/ml/get-job.asciidoc index f2e740897defd..b5322228b9d02 100644 --- a/docs/java-rest/high-level/ml/get-job.asciidoc +++ b/docs/java-rest/high-level/ml/get-job.asciidoc @@ -3,6 +3,7 @@ :request: GetJobRequest :response: GetJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Job API diff --git a/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc b/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc index c23cdcabdf4aa..b0a4f022de484 100644 --- a/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc +++ b/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc @@ -3,6 +3,7 @@ :request: GetModelSnapshotsRequest :response: GetModelSnapshotsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Model Snapshots API diff --git a/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc index c3a2f4f250eba..b0d6791b7051d 100644 --- a/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc +++ b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc @@ -3,6 +3,7 @@ :request: GetOverallBucketsRequest :response: GetOverallBucketsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Overall Buckets API diff --git a/docs/java-rest/high-level/ml/get-records.asciidoc b/docs/java-rest/high-level/ml/get-records.asciidoc index 7543463c30bfd..50c307f8dabed 100644 --- a/docs/java-rest/high-level/ml/get-records.asciidoc +++ b/docs/java-rest/high-level/ml/get-records.asciidoc @@ -3,6 +3,7 @@ :request: GetRecordsRequest :response: GetRecordsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Records API diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index 9b3ec11a7cc13..d0b2b4ccb8246 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -3,6 +3,7 @@ :request: OpenJobRequest :response: OpenJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Open Job API diff --git a/docs/java-rest/high-level/ml/post-calendar-event.asciidoc b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc index ba7c69acf03d9..689df8067f861 100644 --- a/docs/java-rest/high-level/ml/post-calendar-event.asciidoc +++ b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc @@ -3,6 +3,7 @@ :request: PostCalendarEventRequest :response: PostCalendarEventResponse -- +[role="xpack"] [id="{upid}-{api}"] === Post Calendar Event API Adds new ScheduledEvents to an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/post-data.asciidoc b/docs/java-rest/high-level/ml/post-data.asciidoc index fd51dc806966d..eff48f505b0a5 100644 --- a/docs/java-rest/high-level/ml/post-data.asciidoc +++ b/docs/java-rest/high-level/ml/post-data.asciidoc @@ -3,6 +3,7 @@ :request: PostDataRequest :response: PostDataResponse -- +[role="xpack"] [id="{upid}-{api}"] === Post Data API diff --git a/docs/java-rest/high-level/ml/preview-datafeed.asciidoc b/docs/java-rest/high-level/ml/preview-datafeed.asciidoc index 5b812af8344d6..2c5c48001473c 100644 --- a/docs/java-rest/high-level/ml/preview-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/preview-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: PreviewDatafeedRequest :response: PreviewDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Preview Datafeed API diff --git a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc index 17fc3a93063ca..5b2196526fe9a 100644 --- a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc @@ -3,6 +3,7 @@ :request: PutCalendarJobRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Calendar Job API Adds {ml} jobs to an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/put-calendar.asciidoc b/docs/java-rest/high-level/ml/put-calendar.asciidoc index defd72e35a056..c4577e11058f1 100644 --- a/docs/java-rest/high-level/ml/put-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar.asciidoc @@ -3,6 +3,7 @@ :request: PutCalendarRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Calendar API Creates a new {ml} calendar. diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc index 8b2b4dd27f1bf..40a23ed7948dc 100644 --- a/docs/java-rest/high-level/ml/put-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: PutDatafeedRequest :response: PutDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Datafeed API diff --git a/docs/java-rest/high-level/ml/put-filter.asciidoc b/docs/java-rest/high-level/ml/put-filter.asciidoc index 2582e7715ab59..41aedfc821337 100644 --- a/docs/java-rest/high-level/ml/put-filter.asciidoc +++ b/docs/java-rest/high-level/ml/put-filter.asciidoc @@ -3,6 +3,7 @@ :request: PutFilterRequest :response: PutFilterResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Filter API diff --git a/docs/java-rest/high-level/ml/put-job.asciidoc b/docs/java-rest/high-level/ml/put-job.asciidoc index 9934fc6b94ab0..1ced4a031cc72 100644 --- a/docs/java-rest/high-level/ml/put-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-job.asciidoc @@ -3,6 +3,7 @@ :request: PutJobRequest :response: PutJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Job API diff --git a/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc index 7c45ce8ebf0a0..f347efa734094 100644 --- a/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc @@ -3,6 +3,8 @@ :request: RevertModelSnapshotRequest :response: RevertModelSnapshotResponse -- +[role="xpack"] + [id="{upid}-{api}"] === Revert Model Snapshot API diff --git a/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc index 80bb1874e4a63..a869d64afed0c 100644 --- a/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc +++ b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc @@ -3,6 +3,7 @@ :request: SetUpgradeModeRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Set Upgrade Mode API diff --git a/docs/java-rest/high-level/ml/start-datafeed.asciidoc b/docs/java-rest/high-level/ml/start-datafeed.asciidoc index 9c3b096634d81..611c1d994786c 100644 --- a/docs/java-rest/high-level/ml/start-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/start-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: StartDatafeedRequest :response: StartDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Start Datafeed API diff --git a/docs/java-rest/high-level/ml/stop-datafeed.asciidoc b/docs/java-rest/high-level/ml/stop-datafeed.asciidoc index 211d1c5ad7aa3..08958273b18ea 100644 --- a/docs/java-rest/high-level/ml/stop-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/stop-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: StopDatafeedRequest :response: StopDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Stop Datafeed API diff --git a/docs/java-rest/high-level/ml/update-datafeed.asciidoc b/docs/java-rest/high-level/ml/update-datafeed.asciidoc index 86e3a4de336ec..4fc9f66c13a22 100644 --- a/docs/java-rest/high-level/ml/update-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/update-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: UpdateDatafeedRequest :response: PutDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update Datafeed API diff --git a/docs/java-rest/high-level/ml/update-filter.asciidoc b/docs/java-rest/high-level/ml/update-filter.asciidoc index b100000ddc1e8..b2950f30dbf7d 100644 --- a/docs/java-rest/high-level/ml/update-filter.asciidoc +++ b/docs/java-rest/high-level/ml/update-filter.asciidoc @@ -3,6 +3,7 @@ :request: UpdateFilterRequest :response: PutFilterResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update Filter API diff --git a/docs/java-rest/high-level/ml/update-job.asciidoc b/docs/java-rest/high-level/ml/update-job.asciidoc index 90f9cf85c4887..f1aee3531fdf7 100644 --- a/docs/java-rest/high-level/ml/update-job.asciidoc +++ b/docs/java-rest/high-level/ml/update-job.asciidoc @@ -3,6 +3,7 @@ :request: UpdateJobRequest :response: PutJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update Job API diff --git a/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc index b38539b062224..c4d0e763ef87f 100644 --- a/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc @@ -3,6 +3,7 @@ :request: UpdateModelSnapshotRequest :response: UpdateModelSnapshotResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update Model Snapshot API diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 32a3b06505b1d..eb401618f367b 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -28,7 +28,7 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-simple-query-string-query.html[Simple Query String] | {query-ref}/SimpleQueryStringBuilder.html[SimpleQueryStringBuilder] | {query-ref}/QueryBuilders.html#simpleQueryStringQuery-java.lang.String-[QueryBuilders.simpleQueryStringQuery()] |====== -==== Term level queries +==== Term-level queries [options="header"] |====== | Search Query | QueryBuilder Class | Method in QueryBuilders diff --git a/docs/java-rest/high-level/rollup/delete_job.asciidoc b/docs/java-rest/high-level/rollup/delete_job.asciidoc index c98a6fb732659..930713331ae22 100644 --- a/docs/java-rest/high-level/rollup/delete_job.asciidoc +++ b/docs/java-rest/high-level/rollup/delete_job.asciidoc @@ -3,7 +3,7 @@ :request: DeleteRollupJobRequest :response: DeleteRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Rollup Job API diff --git a/docs/java-rest/high-level/rollup/get_job.asciidoc b/docs/java-rest/high-level/rollup/get_job.asciidoc index 68733113e53c8..ce6ca9feb3043 100644 --- a/docs/java-rest/high-level/rollup/get_job.asciidoc +++ b/docs/java-rest/high-level/rollup/get_job.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-rollup-get-job]] === Get Rollup Job API diff --git a/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc b/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc index c11f5d231b09d..cc320558d4f1c 100644 --- a/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc +++ b/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc @@ -3,7 +3,7 @@ :request: GetRollupCapsRequest :response: GetRollupCapsResponse -- - +[role="xpack"] [id="{upid}-x-pack-{api}"] === Get Rollup Capabilities API diff --git a/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc index 52cb7ff952472..bd69a5cd55fd5 100644 --- a/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc +++ b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc @@ -3,7 +3,7 @@ :request: GetRollupIndexCapsRequest :response: GetRollupIndexCapsResponse -- - +[role="xpack"] [id="{upid}-x-pack-{api}"] === Get Rollup Index Capabilities API diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc index 0b7ece05ca89b..0ef2e0d3c41c4 100644 --- a/docs/java-rest/high-level/rollup/put_job.asciidoc +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-rollup-put-job]] === Put Rollup Job API diff --git a/docs/java-rest/high-level/rollup/search.asciidoc b/docs/java-rest/high-level/rollup/search.asciidoc index 49bf983edd429..6139cd7238d86 100644 --- a/docs/java-rest/high-level/rollup/search.asciidoc +++ b/docs/java-rest/high-level/rollup/search.asciidoc @@ -3,7 +3,7 @@ :request: SearchRequest :response: SearchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Rollup Search API diff --git a/docs/java-rest/high-level/rollup/start_job.asciidoc b/docs/java-rest/high-level/rollup/start_job.asciidoc index 6d760dc0b33e6..8cc94a234bdc1 100644 --- a/docs/java-rest/high-level/rollup/start_job.asciidoc +++ b/docs/java-rest/high-level/rollup/start_job.asciidoc @@ -3,7 +3,7 @@ :request: StartRollupJobRequest :response: StartRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Start Rollup Job API diff --git a/docs/java-rest/high-level/rollup/stop_job.asciidoc b/docs/java-rest/high-level/rollup/stop_job.asciidoc index cba1dcdd2d374..56de078d45dd2 100644 --- a/docs/java-rest/high-level/rollup/stop_job.asciidoc +++ b/docs/java-rest/high-level/rollup/stop_job.asciidoc @@ -3,7 +3,7 @@ :request: StopRollupJobRequest :response: StopRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Stop Rollup Job API diff --git a/docs/java-rest/high-level/security/authenticate.asciidoc b/docs/java-rest/high-level/security/authenticate.asciidoc index 4d4467a03b4d2..8f2a91a9ca5c6 100644 --- a/docs/java-rest/high-level/security/authenticate.asciidoc +++ b/docs/java-rest/high-level/security/authenticate.asciidoc @@ -3,7 +3,7 @@ :api: authenticate :response: AuthenticateResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Authenticate API diff --git a/docs/java-rest/high-level/security/change-password.asciidoc b/docs/java-rest/high-level/security/change-password.asciidoc index 36d66b194cfea..6593e8105981a 100644 --- a/docs/java-rest/high-level/security/change-password.asciidoc +++ b/docs/java-rest/high-level/security/change-password.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-change-password]] === Change Password API diff --git a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc index 5427db148d65e..41c100e1ec887 100644 --- a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc +++ b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc @@ -4,7 +4,7 @@ :request: ClearRealmCacheRequest :response: ClearRealmCacheResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Clear Realm Cache API diff --git a/docs/java-rest/high-level/security/clear-roles-cache.asciidoc b/docs/java-rest/high-level/security/clear-roles-cache.asciidoc index 851824bab5f9b..39e344f6ce985 100644 --- a/docs/java-rest/high-level/security/clear-roles-cache.asciidoc +++ b/docs/java-rest/high-level/security/clear-roles-cache.asciidoc @@ -4,7 +4,7 @@ :request: ClearRolesCacheRequest :response: ClearRolesCacheResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Clear Roles Cache API diff --git a/docs/java-rest/high-level/security/create-api-key.asciidoc b/docs/java-rest/high-level/security/create-api-key.asciidoc index 93c3fa16de1da..8a77f11484de0 100644 --- a/docs/java-rest/high-level/security/create-api-key.asciidoc +++ b/docs/java-rest/high-level/security/create-api-key.asciidoc @@ -3,7 +3,7 @@ :request: CreateApiKeyRequest :response: CreateApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Create API Key API diff --git a/docs/java-rest/high-level/security/create-token.asciidoc b/docs/java-rest/high-level/security/create-token.asciidoc index 33e55d4ed582b..d911c747a13f2 100644 --- a/docs/java-rest/high-level/security/create-token.asciidoc +++ b/docs/java-rest/high-level/security/create-token.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-create-token]] === Create Token API diff --git a/docs/java-rest/high-level/security/delete-privileges.asciidoc b/docs/java-rest/high-level/security/delete-privileges.asciidoc index 7f32d75107b97..827ccf5b1e52b 100644 --- a/docs/java-rest/high-level/security/delete-privileges.asciidoc +++ b/docs/java-rest/high-level/security/delete-privileges.asciidoc @@ -3,7 +3,7 @@ :request: DeletePrivilegesRequest :response: DeletePrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Privileges API diff --git a/docs/java-rest/high-level/security/delete-role-mapping.asciidoc b/docs/java-rest/high-level/security/delete-role-mapping.asciidoc index 63025e9d68181..5279d95368851 100644 --- a/docs/java-rest/high-level/security/delete-role-mapping.asciidoc +++ b/docs/java-rest/high-level/security/delete-role-mapping.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-delete-role-mapping]] === Delete Role Mapping API diff --git a/docs/java-rest/high-level/security/delete-role.asciidoc b/docs/java-rest/high-level/security/delete-role.asciidoc index 0086b89bb6897..d2f4ef6f88ad3 100644 --- a/docs/java-rest/high-level/security/delete-role.asciidoc +++ b/docs/java-rest/high-level/security/delete-role.asciidoc @@ -3,7 +3,7 @@ :request: DeleteRoleRequest :response: DeleteRoleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Role API diff --git a/docs/java-rest/high-level/security/delete-user.asciidoc b/docs/java-rest/high-level/security/delete-user.asciidoc index 52573bb29c74e..43d65fc4e976d 100644 --- a/docs/java-rest/high-level/security/delete-user.asciidoc +++ b/docs/java-rest/high-level/security/delete-user.asciidoc @@ -3,7 +3,7 @@ :request: DeleteUserRequest :response: DeleteUserResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete User API diff --git a/docs/java-rest/high-level/security/disable-user.asciidoc b/docs/java-rest/high-level/security/disable-user.asciidoc index 564b8699ebb8d..90b89c2779fbe 100644 --- a/docs/java-rest/high-level/security/disable-user.asciidoc +++ b/docs/java-rest/high-level/security/disable-user.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-disable-user]] === Disable User API diff --git a/docs/java-rest/high-level/security/enable-user.asciidoc b/docs/java-rest/high-level/security/enable-user.asciidoc index 4be0f38e39fa6..7e8bac12e270a 100644 --- a/docs/java-rest/high-level/security/enable-user.asciidoc +++ b/docs/java-rest/high-level/security/enable-user.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-enable-user]] === Enable User API diff --git a/docs/java-rest/high-level/security/get-api-key.asciidoc b/docs/java-rest/high-level/security/get-api-key.asciidoc index bb98b527d22ba..88aa360384178 100644 --- a/docs/java-rest/high-level/security/get-api-key.asciidoc +++ b/docs/java-rest/high-level/security/get-api-key.asciidoc @@ -3,7 +3,7 @@ :request: GetApiKeyRequest :response: GetApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get API Key information API diff --git a/docs/java-rest/high-level/security/get-certificates.asciidoc b/docs/java-rest/high-level/security/get-certificates.asciidoc index 2f46cfc927a81..5ada3c8a712db 100644 --- a/docs/java-rest/high-level/security/get-certificates.asciidoc +++ b/docs/java-rest/high-level/security/get-certificates.asciidoc @@ -4,7 +4,7 @@ :response: GetSslCertificatesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === SSL Certificate API diff --git a/docs/java-rest/high-level/security/get-privileges.asciidoc b/docs/java-rest/high-level/security/get-privileges.asciidoc index 6eee8bbc3c1f5..d63f4774d07e5 100644 --- a/docs/java-rest/high-level/security/get-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-privileges.asciidoc @@ -4,7 +4,7 @@ :request: GetPrivilegesRequest :response: GetPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Privileges API diff --git a/docs/java-rest/high-level/security/get-role-mappings.asciidoc b/docs/java-rest/high-level/security/get-role-mappings.asciidoc index cc58d0980c3e7..b279702a4e123 100644 --- a/docs/java-rest/high-level/security/get-role-mappings.asciidoc +++ b/docs/java-rest/high-level/security/get-role-mappings.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-get-role-mappings]] === Get Role Mappings API diff --git a/docs/java-rest/high-level/security/get-roles.asciidoc b/docs/java-rest/high-level/security/get-roles.asciidoc index 777349222992e..2c698222c7a44 100644 --- a/docs/java-rest/high-level/security/get-roles.asciidoc +++ b/docs/java-rest/high-level/security/get-roles.asciidoc @@ -4,7 +4,7 @@ :request: GetRolesRequest :response: GetRolesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Roles API diff --git a/docs/java-rest/high-level/security/get-user-privileges.asciidoc b/docs/java-rest/high-level/security/get-user-privileges.asciidoc index 641d238df6434..b8051cbfae67a 100644 --- a/docs/java-rest/high-level/security/get-user-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-user-privileges.asciidoc @@ -3,7 +3,7 @@ :request: GetUserPrivilegesRequest :response: GetUserPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get User Privileges API diff --git a/docs/java-rest/high-level/security/get-users.asciidoc b/docs/java-rest/high-level/security/get-users.asciidoc index 1d41bd76166b4..cbd45801fe99b 100644 --- a/docs/java-rest/high-level/security/get-users.asciidoc +++ b/docs/java-rest/high-level/security/get-users.asciidoc @@ -4,7 +4,7 @@ :request: GetUsersRequest :response: GetUsersResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Users API diff --git a/docs/java-rest/high-level/security/has-privileges.asciidoc b/docs/java-rest/high-level/security/has-privileges.asciidoc index 181b1b7f48167..7c5f09a171ced 100644 --- a/docs/java-rest/high-level/security/has-privileges.asciidoc +++ b/docs/java-rest/high-level/security/has-privileges.asciidoc @@ -3,7 +3,7 @@ :request: HasPrivilegesRequest :response: HasPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Has Privileges API diff --git a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc index 7f9c43b3165a8..e5e38a17fc981 100644 --- a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc +++ b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc @@ -3,7 +3,7 @@ :request: InvalidateApiKeyRequest :response: InvalidateApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Invalidate API Key API diff --git a/docs/java-rest/high-level/security/invalidate-token.asciidoc b/docs/java-rest/high-level/security/invalidate-token.asciidoc index 76d4909ff049a..34969523c7ba8 100644 --- a/docs/java-rest/high-level/security/invalidate-token.asciidoc +++ b/docs/java-rest/high-level/security/invalidate-token.asciidoc @@ -3,7 +3,7 @@ :request: InvalidateTokenRequest :response: InvalidateTokenResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Invalidate Token API diff --git a/docs/java-rest/high-level/security/put-privileges.asciidoc b/docs/java-rest/high-level/security/put-privileges.asciidoc index 1c0a97d2a94c3..ba8d8878e1576 100644 --- a/docs/java-rest/high-level/security/put-privileges.asciidoc +++ b/docs/java-rest/high-level/security/put-privileges.asciidoc @@ -3,7 +3,7 @@ :request: PutPrivilegesRequest :response: PutPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Privileges API diff --git a/docs/java-rest/high-level/security/put-role-mapping.asciidoc b/docs/java-rest/high-level/security/put-role-mapping.asciidoc index f71c7648803dc..819aa776b68ba 100644 --- a/docs/java-rest/high-level/security/put-role-mapping.asciidoc +++ b/docs/java-rest/high-level/security/put-role-mapping.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-put-role-mapping]] === Put Role Mapping API diff --git a/docs/java-rest/high-level/security/put-role.asciidoc b/docs/java-rest/high-level/security/put-role.asciidoc index 68c1f5d69d470..d418375237d47 100644 --- a/docs/java-rest/high-level/security/put-role.asciidoc +++ b/docs/java-rest/high-level/security/put-role.asciidoc @@ -4,7 +4,7 @@ :request: PutRoleRequest :response: PutRoleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Role API diff --git a/docs/java-rest/high-level/security/put-user.asciidoc b/docs/java-rest/high-level/security/put-user.asciidoc index 714dd61e1193d..bca93244175d7 100644 --- a/docs/java-rest/high-level/security/put-user.asciidoc +++ b/docs/java-rest/high-level/security/put-user.asciidoc @@ -3,7 +3,7 @@ :request: PutUserRequest :response: PutUserResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put User API diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 1df10985e7e3b..6417bbd710dd2 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -348,14 +348,11 @@ include::ml/set-upgrade-mode.asciidoc[] The Java High Level REST Client supports the following Migration APIs: -* <> -* <<{upid}-upgrade>> * <<{upid}-get-deprecation-info>> -include::migration/get-assistance.asciidoc[] -include::migration/upgrade.asciidoc[] include::migration/get-deprecation-info.asciidoc[] +[role="xpack"] == Rollup APIs :upid: {mainid}-rollup @@ -381,6 +378,7 @@ include::rollup/search.asciidoc[] include::rollup/get_rollup_caps.asciidoc[] include::rollup/get_rollup_index_caps.asciidoc[] +[role="xpack"] == Security APIs :upid: {mainid}-security @@ -442,6 +440,7 @@ include::security/create-api-key.asciidoc[] include::security/get-api-key.asciidoc[] include::security/invalidate-api-key.asciidoc[] +[role="xpack"] == Watcher APIs :upid: {mainid}-watcher @@ -471,6 +470,7 @@ include::watcher/activate-watch.asciidoc[] include::watcher/execute-watch.asciidoc[] include::watcher/watcher-stats.asciidoc[] +[role="xpack"] == Graph APIs The Java High Level REST Client supports the following Graph APIs: @@ -491,6 +491,7 @@ don't leak into the rest of the documentation. :upid!: -- +[role="xpack"] == CCR APIs :upid: {mainid}-ccr @@ -502,22 +503,27 @@ The Java High Level REST Client supports the following CCR APIs: * <<{upid}-ccr-pause-follow>> * <<{upid}-ccr-resume-follow>> * <<{upid}-ccr-unfollow>> +* <<{upid}-ccr-forget-follower>> * <<{upid}-ccr-put-auto-follow-pattern>> * <<{upid}-ccr-delete-auto-follow-pattern>> * <<{upid}-ccr-get-auto-follow-pattern>> * <<{upid}-ccr-get-stats>> * <<{upid}-ccr-get-follow-stats>> +* <<{upid}-ccr-get-follow-info>> include::ccr/put_follow.asciidoc[] include::ccr/pause_follow.asciidoc[] include::ccr/resume_follow.asciidoc[] include::ccr/unfollow.asciidoc[] +include::ccr/forget_follower.asciidoc[] include::ccr/put_auto_follow_pattern.asciidoc[] include::ccr/delete_auto_follow_pattern.asciidoc[] include::ccr/get_auto_follow_pattern.asciidoc[] include::ccr/get_stats.asciidoc[] include::ccr/get_follow_stats.asciidoc[] +include::ccr/get_follow_info.asciidoc[] +[role="xpack"] == Index Lifecycle Management APIs :upid: {mainid}-ilm diff --git a/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc index 089f87c00a2ef..42f31322896e8 100644 --- a/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc +++ b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc @@ -18,7 +18,7 @@ task selection parameters as the list tasks command. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-filter] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-request-filter] -------------------------------------------------- <1> Cancel a task <2> Cancel only cluster-related tasks @@ -28,7 +28,7 @@ include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-f ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-execute] -------------------------------------------------- ==== Asynchronous Execution @@ -62,20 +62,20 @@ provided as an argument ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-tasks] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-response-tasks] -------------------------------------------------- <1> List of cancelled tasks ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-calc] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-response-calc] -------------------------------------------------- <1> List of cancelled tasks grouped by a node <2> List of cancelled tasks grouped by a parent task ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-failures] +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-response-failures] -------------------------------------------------- <1> List of node failures <2> List of task cancellation failures diff --git a/docs/java-rest/high-level/watcher/ack-watch.asciidoc b/docs/java-rest/high-level/watcher/ack-watch.asciidoc index 46a516798594b..fdfc4d8240c46 100644 --- a/docs/java-rest/high-level/watcher/ack-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/ack-watch.asciidoc @@ -4,13 +4,14 @@ :response: AckWatchResponse -- +[role="xpack"] [id="{upid}-{api}"] === Ack Watch API [id="{upid}-{api}-request"] ==== Execution -{xpack-ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you +{ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you to manually throttle execution of a watch's actions. A watch can be acknowledged through the following request: diff --git a/docs/java-rest/high-level/watcher/activate-watch.asciidoc b/docs/java-rest/high-level/watcher/activate-watch.asciidoc index 52124ccb6eddb..21fc5e179ea6f 100644 --- a/docs/java-rest/high-level/watcher/activate-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/activate-watch.asciidoc @@ -3,7 +3,7 @@ :request: ActivateWatchRequest :response: ActivateWatchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Activate Watch API diff --git a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc index ca2178e5c05e2..e11a713ed065b 100644 --- a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc @@ -4,6 +4,7 @@ :response: deactivateWatchResponse :doc-tests-file: {doc-tests}/WatcherDocumentationIT.java -- +[role="xpack"] [[java-rest-high-watcher-deactivate-watch]] === Deactivate Watch API diff --git a/docs/java-rest/high-level/watcher/delete-watch.asciidoc b/docs/java-rest/high-level/watcher/delete-watch.asciidoc index 615337ba317bf..19ba7c4373ae8 100644 --- a/docs/java-rest/high-level/watcher/delete-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/delete-watch.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-watcher-delete-watch]] === Delete Watch API diff --git a/docs/java-rest/high-level/watcher/execute-watch.asciidoc b/docs/java-rest/high-level/watcher/execute-watch.asciidoc index ed5b4b1659dc5..05f1d9cdbfa31 100644 --- a/docs/java-rest/high-level/watcher/execute-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/execute-watch.asciidoc @@ -3,6 +3,7 @@ :request: ExecuteWatchRequest :response: ExecuteWatchResponse -- +[role="xpack"] [id="{upid}-{api}"] === Execute Watch API diff --git a/docs/java-rest/high-level/watcher/get-watch.asciidoc b/docs/java-rest/high-level/watcher/get-watch.asciidoc index 7321a66eeaaf4..544e894abae0c 100644 --- a/docs/java-rest/high-level/watcher/get-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/get-watch.asciidoc @@ -3,7 +3,7 @@ :request: GetWatchRequest :response: GetWatchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Watch API diff --git a/docs/java-rest/high-level/watcher/put-watch.asciidoc b/docs/java-rest/high-level/watcher/put-watch.asciidoc index e5ee87bea34a6..7d259446d3b27 100644 --- a/docs/java-rest/high-level/watcher/put-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/put-watch.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-watcher-put-watch]] === Put Watch API diff --git a/docs/java-rest/high-level/watcher/start-watch-service.asciidoc b/docs/java-rest/high-level/watcher/start-watch-service.asciidoc index 9e3eaf1359f51..bcc4e6bb7c3dc 100644 --- a/docs/java-rest/high-level/watcher/start-watch-service.asciidoc +++ b/docs/java-rest/high-level/watcher/start-watch-service.asciidoc @@ -3,6 +3,7 @@ :request: StartWatchServiceRequest :response: StartWatchServiceResponse -- +[role="xpack"] [id="{upid}-{api}"] === Start Watch Service API diff --git a/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc b/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc index 173edf3cc7b23..7809f9a9092fd 100644 --- a/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc +++ b/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc @@ -3,6 +3,7 @@ :request: StopWatchServiceRequest :response: StopWatchServiceResponse -- +[role="xpack"] [id="{upid}-{api}"] === Stop Watch Service API diff --git a/docs/java-rest/high-level/watcher/watcher-stats.asciidoc b/docs/java-rest/high-level/watcher/watcher-stats.asciidoc index 7fd27053fcbaa..b0ef26782b675 100644 --- a/docs/java-rest/high-level/watcher/watcher-stats.asciidoc +++ b/docs/java-rest/high-level/watcher/watcher-stats.asciidoc @@ -3,6 +3,7 @@ :request: WatcherStatsRequest :response: WatcherStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Watcher Stats API diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index ee1555019dbe1..1f2bafa9891bd 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -224,7 +224,7 @@ Once the `RestClient` has been created, requests can be sent by calling either will block the calling thread and return the `Response` when the request is successful or throw an exception if it fails. `performRequestAsync` is asynchronous and accepts a `ResponseListener` argument that it calls with a -`Response` when the request is successful or with an `Exception` if it4 fails. +`Response` when the request is successful or with an `Exception` if it fails. This is synchronous: diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-casting.asciidoc index 4bcd14cbfc6a1..c131ec5b6e015 100644 --- a/docs/painless/painless-casting.asciidoc +++ b/docs/painless/painless-casting.asciidoc @@ -28,9 +28,9 @@ cast: '(' TYPE ')' expression + [source,Painless] ---- -<1> int i = (int)5L; -<2> Map m = new HashMap(); -<3> HashMap hm = (HashMap)m; +int i = (int)5L; <1> +Map m = new HashMap(); <2> +HashMap hm = (HashMap)m; <3> ---- + <1> declare `int i`; @@ -75,10 +75,10 @@ following table: + [source,Painless] ---- -<1> int a = 1; -<2> long b = a; -<3> short c = (short)b; -<4> double e = (double)a; +int a = 1; <1> +long b = a; <2> +short c = (short)b; <3> +double e = (double)a; <4> ---- + <1> declare `int a`; @@ -101,9 +101,9 @@ following table: + [source,Painless] ---- -<1> int a = 1.0; // error -<2> int b = 2; -<3> byte c = b; // error +int a = 1.0; // error <1> +int b = 2; <2> +byte c = b; // error <3> ---- + <1> declare `int i`; @@ -132,11 +132,11 @@ or the target type is a descendant of the original type. + [source,Painless] ---- -<1> List x; -<2> ArrayList y = new ArrayList(); -<3> x = y; -<4> y = (ArrayList)x; -<5> x = (List)y; +List x; <1> +ArrayList y = new ArrayList(); <2> +x = y; <3> +y = (ArrayList)x; <4> +x = (List)y; <5> ---- + <1> declare `List x`; @@ -161,9 +161,9 @@ or the target type is a descendant of the original type. + [source,Painless] ---- -<1> List x = new ArrayList(); -<2> ArrayList y = x; // error -<3> Map m = (Map)x; // error +List x = new ArrayList(); <1> +ArrayList y = x; // error <2> +Map m = (Map)x; // error <3> ---- + <1> declare `List x`; @@ -201,11 +201,11 @@ based on the current type value the `def` type value represents. + [source,Painless] ---- -<1> def d0 = 3; -<2> d0 = new ArrayList(); -<3> Object o = new HashMap(); -<4> def d1 = o; -<5> int i = d1.size(); +def d0 = 3; <1> +d0 = new ArrayList(); <2> +Object o = new HashMap(); <3> +def d1 = o; <4> +int i = d1.size(); <5> ---- + <1> declare `def d0`; @@ -236,12 +236,12 @@ based on the current type value the `def` type value represents. + [source,Painless] ---- -<1> def d = 1.0; -<2> int i = (int)d; -<3> d = 1; -<4> float f = d; -<5> d = new ArrayList(); -<6> List l = d; +def d = 1.0; <1> +int i = (int)d; <2> +d = 1; <3> +float f = d; <4> +d = new ArrayList(); <5> +List l = d; <6> ---- + <1> declare `def d`; @@ -274,10 +274,10 @@ based on the current type value the `def` type value represents. + [source,Painless] ---- -<1> def d = 1; -<2> short s = d; // error -<3> d = new HashMap(); -<4> List l = d; // error +def d = 1; <1> +short s = d; // error <2> +d = new HashMap(); <3> +List l = d; // error <4> ---- <1> declare `def d`; implicit cast `int 1` to `def` -> `def`; @@ -314,8 +314,8 @@ Use the cast operator to convert a <> value into a + [source,Painless] ---- -<1> char c = (char)"C"; -<2> c = (char)'c'; +char c = (char)"C"; <1> +c = (char)'c'; <2> ---- + <1> declare `char c`; @@ -328,8 +328,8 @@ Use the cast operator to convert a <> value into a + [source,Painless] ---- -<1> String s = "s"; -<2> char c = (char)s; +String s = "s"; <1> +char c = (char)s; <2> ---- <1> declare `String s`; store `String "s"` to `s`; @@ -368,10 +368,10 @@ value and vice versa. + [source,Painless] ---- -<1> List l = new ArrayList(); -<2> l.add(1); -<3> Integer I = Integer.valueOf(0); -<4> int i = l.get(i); +List l = new ArrayList(); <1> +l.add(1); <2> +Integer I = Integer.valueOf(0); <3> +int i = l.get(i); <4> ---- + <1> declare `List l`; @@ -399,10 +399,10 @@ value and vice versa. + [source,Painless] ---- -<1> Integer x = 1; // error -<2> Integer y = (Integer)1; // error -<3> int a = Integer.valueOf(1); // error -<4> int b = (int)Integer.valueOf(1); // error +Integer x = 1; // error <1> +Integer y = (Integer)1; // error <2> +int a = Integer.valueOf(1); // error <3> +int b = (int)Integer.valueOf(1); // error <4> ---- + <1> declare `Integer x`; @@ -437,9 +437,9 @@ based on the type the `def` value represents. + [source,Painless] ---- -<1> double d = 2 + 2.0; -<2> def x = 1; -<3> float f = x + 2.0F; +double d = 2 + 2.0; <1> +def x = 1; <2> +float f = x + 2.0F; <3> ---- <1> declare `double d`; promote `int 2` and `double 2.0 @0`: result `double`; diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index 7c342a3da7a5a..75b1a450c0dcd 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -49,9 +49,9 @@ specialized code may define new ways to use a Painless script. | Bucket selector aggregation | <> | {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Elasticsearch Documentation] | Watcher condition | <> - | {xpack-ref}/condition-script.html[Elasticsearch Documentation] + | {ref}/condition-script.html[Elasticsearch Documentation] | Watcher transform | <> - | {xpack-ref}/transform-script.html[Elasticsearch Documentation] + | {ref}/transform-script.html[Elasticsearch Documentation] |==== include::painless-contexts/painless-context-examples.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc index 8e20cf77c353d..69fbce1d0828f 100644 --- a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc @@ -15,7 +15,7 @@ to determine if a bucket should be retained or filtered out. ==== Return boolean:: - True if the the bucket should be retained, false if the bucket should be filtered out. + True if the bucket should be retained, false if the bucket should be filtered out. ==== API @@ -78,4 +78,4 @@ GET /seats/_search // TEST[setup:seats] <1> The `buckets_path` points to the max aggregations (`max_cost`) and adds `max` variables to the `params` map -<2> The user-specified `base_cost` is also added to the `params` map \ No newline at end of file +<2> The user-specified `base_cost` is also added to the `params` map diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index 8a0691459960f..80224c2575eac 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -5,7 +5,7 @@ To run the examples, index the sample seat data into Elasticsearch. The examples must be run sequentially to work correctly. . Download the -https://download.elastic.co/demos/painless/contexts/seats.json[seat data]. This +https://download.elastic.co/demos/painless/contexts/seats-init.json[seat data]. This data set contains booking information for a collection of plays. Each document represents a single seat for a play at a particular theater on a specific date and time. @@ -72,7 +72,7 @@ seat data is indexed. + [source,js] ---- -curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@//seats.json" +curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@//seats-init.json" ---- // NOTCONSOLE diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index 91ab51561ef88..bac9bf7a4353a 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -1,7 +1,7 @@ [[painless-watcher-condition-context]] === Watcher condition context -Use a Painless script as a {xpack-ref}/condition-script.html[watch condition] +Use a Painless script as a {ref}/condition-script.html[watch condition] that determines whether to execute a watch or a particular action within a watch. Condition scripts return a Boolean value to indicate the status of the condition. diff --git a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc index 2d2e3993e3233..fa78b4855f210 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc @@ -104,7 +104,7 @@ The following example shows the use of metadata and transforming dates into a re [source,Painless] ---- -POST _xpack/watcher/watch/_execute +POST _watcher/watch/_execute { "watch" : { "metadata" : { "min_hits": 10000 }, diff --git a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc index addfd11cab92e..71a00711091da 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-variables.asciidoc @@ -29,7 +29,7 @@ The following variables are available in all watcher contexts. `ctx['payload']` (`Map`, read-only):: The accessible watch data based upon the - {xpack-ref}/input.html[watch input]. + {ref}/input.html[watch input]. *API* diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index 92012720aa69e..a188585ad5b5a 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -1,7 +1,7 @@ [[painless-watcher-transform-context]] === Watcher transform context -Use a Painless script as a {xpack-ref}/transform-script.html[watch transform] +Use a Painless script as a {ref}/transform-script.html[watch transform] to transform a payload into a new payload for further use in the watch. Transform scripts return an Object value of the new payload. diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-literals.asciidoc index 621fc152be956..f2e5849638048 100644 --- a/docs/painless/painless-literals.asciidoc +++ b/docs/painless/painless-literals.asciidoc @@ -30,12 +30,12 @@ HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?; + [source,Painless] ---- -<1> 0 -<2> 0D -<3> 1234L -<4> -90f -<5> -022 -<6> 0xF2A +0 <1> +0D <2> +1234L <3> +-90f <4> +-022 <5> +0xF2A <6> ---- + <1> `int 0` @@ -67,11 +67,11 @@ EXPONENT: ( [eE] [+\-]? [0-9]+ ); + [source,Painless] ---- -<1> 0.0 -<2> 1E6 -<3> 0.977777 -<4> -126.34 -<5> 89.9F +0.0 <1> +1E6 <2> +0.977777 <3> +-126.34 <4> +89.9F <5> ---- + <1> `double 0.0` diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-operators-array.asciidoc index e80a863df2747..ad23a980cb4b8 100644 --- a/docs/painless/painless-operators-array.asciidoc +++ b/docs/painless/painless-operators-array.asciidoc @@ -29,7 +29,7 @@ expression_list: expression (',' expression); + [source,Painless] ---- -<1> int[] x = new int[] {1, 2, 3}; +int[] x = new int[] {1, 2, 3}; <1> ---- + <1> declare `int[] x`; @@ -44,12 +44,12 @@ expression_list: expression (',' expression); + [source,Painless] ---- -<1> int i = 1; -<2> long l = 2L; -<3> float f = 3.0F; -<4> double d = 4.0; -<5> String s = "5"; -<6> def array = new def[] {i, l, f*d, s}; +int i = 1; <1> +long l = 2L; <2> +float f = 3.0F; <3> +double d = 4.0; <4> +String s = "5"; <5> +def array = new def[] {i, l, f*d, s}; <6> ---- + <1> declare `int i`; @@ -114,12 +114,12 @@ brace_access: '[' expression ']' + [source,Painless] ---- -<1> int[] x = new int[2]; -<2> x[0] = 2; -<3> x[1] = 5; -<4> int y = x[0] + x[1]; -<5> int z = 1; -<6> int i = x[z]; +int[] x = new int[2]; <1> +x[0] = 2; <2> +x[1] = 5; <3> +int y = x[0] + x[1]; <4> +int z = 1; <5> +int i = x[z]; <6> ---- + <1> declare `int[] x`; @@ -149,12 +149,12 @@ brace_access: '[' expression ']' + [source,Painless] ---- -<1> def d = new int[2]; -<2> d[0] = 2; -<3> d[1] = 5; -<4> def x = d[0] + d[1]; -<5> def y = 1; -<6> def z = d[y]; +def d = new int[2]; <1> +d[0] = 2; <2> +d[1] = 5; <3> +def x = d[0] + d[1]; <4> +def y = 1; <5> +def z = d[y]; <6> ---- + <1> declare `def d`; @@ -199,9 +199,9 @@ brace_access: '[' expression ']' + [source,Painless] ---- -<1> int[][][] ia3 = new int[2][3][4]; -<2> ia3[1][2][3] = 99; -<3> int i = ia3[1][2][3]; +int[][][] ia3 = new int[2][3][4]; <1> +ia3[1][2][3] = 99; <2> +int i = ia3[1][2][3]; <3> ---- + <1> declare `int[][][] ia`; @@ -230,8 +230,8 @@ from an array type value. + [source,Painless] ---- -<1> int[] x = new int[10]; -<2> int l = x.length; +int[] x = new int[10]; <1> +int l = x.length; <2> ---- <1> declare `int[] x`; allocate `1-d int array` instance with `length [2]` @@ -269,10 +269,10 @@ new_array: 'new' TYPE ('[' expression ']')+; + [source,Painless] ---- -<1> int[] x = new int[5]; -<2> x = new int[10]; -<3> int y = 2; -<4> def z = new def[y][y*2]; +int[] x = new int[5]; <1> +x = new int[10]; <2> +int y = 2; <3> +def z = new def[y][y*2]; <4> ---- + <1> declare `int[] x`; diff --git a/docs/painless/painless-operators-boolean.asciidoc b/docs/painless/painless-operators-boolean.asciidoc index 1223a8d56e78d..6f9481aa4ecd3 100644 --- a/docs/painless/painless-operators-boolean.asciidoc +++ b/docs/painless/painless-operators-boolean.asciidoc @@ -34,8 +34,8 @@ boolean_not: '!' expression; + [source,Painless] ---- -<1> boolean x = !false; -<2> boolean y = !x; +boolean x = !false; <1> +boolean y = !x; <2> ---- <1> declare `boolean x`; boolean not `boolean false` -> `boolean true`; @@ -49,8 +49,8 @@ boolean_not: '!' expression; + [source,Painless] ---- -<1> def y = true; -<2> def z = !y; +def y = true; <1> +def z = !y; <2> ---- + <1> declare `def y`; @@ -103,9 +103,9 @@ greater_than: expression '>' expression; + [source,Painless] ---- -<1> boolean x = 5 > 4; -<2> double y = 6.0; -<3> x = 6 > y; +boolean x = 5 > 4; <1> +double y = 6.0; <2> +x = 6 > y; <3> ---- + <1> declare `boolean x`; @@ -123,10 +123,10 @@ greater_than: expression '>' expression; + [source,Painless] ---- -<1> int x = 5; -<2> def y = 7.0; -<3> def z = y > 6.5; -<4> def a = x > y; +int x = 5; <1> +def y = 7.0; <2> +def z = y > 6.5; <3> +def a = x > y; <4> ---- + <1> declare `int x`; @@ -190,9 +190,9 @@ greater_than_or_equal: expression '>=' expression; + [source,Painless] ---- -<1> boolean x = 5 >= 4; -<2> double y = 6.0; -<3> x = 6 >= y; +boolean x = 5 >= 4; <1> +double y = 6.0; <2> +x = 6 >= y; <3> ---- + <1> declare `boolean x`; @@ -210,10 +210,10 @@ greater_than_or_equal: expression '>=' expression; + [source,Painless] ---- -<1> int x = 5; -<2> def y = 7.0; -<3> def z = y >= 7.0; -<4> def a = x >= y; +int x = 5; <1> +def y = 7.0; <2> +def z = y >= 7.0; <3> +def a = x >= y; <4> ---- + <1> declare `int x`; @@ -277,9 +277,9 @@ less_than: expression '<' expression; + [source,Painless] ---- -<1> boolean x = 5 < 4; -<2> double y = 6.0; -<3> x = 6 < y; +boolean x = 5 < 4; <1> +double y = 6.0; <2> +x = 6 < y; <3> ---- + <1> declare `boolean x`; @@ -297,10 +297,10 @@ less_than: expression '<' expression; + [source,Painless] ---- -<1> int x = 5; -<2> def y = 7.0; -<3> def z = y < 6.5; -<4> def a = x < y; +int x = 5; <1> +def y = 7.0; <2> +def z = y < 6.5; <3> +def a = x < y; <4> ---- + <1> declare `int x`; @@ -364,9 +364,9 @@ greater_than_or_equal: expression '<=' expression; + [source,Painless] ---- -<1> boolean x = 5 <= 4; -<2> double y = 6.0; -<3> x = 6 <= y; +boolean x = 5 <= 4; <1> +double y = 6.0; <2> +x = 6 <= y; <3> ---- + <1> declare `boolean x`; @@ -384,10 +384,10 @@ greater_than_or_equal: expression '<=' expression; + [source,Painless] ---- -<1> int x = 5; -<2> def y = 7.0; -<3> def z = y <= 7.0; -<4> def a = x <= y; +int x = 5; <1> +def y = 7.0; <2> +def z = y <= 7.0; <3> +def a = x <= y; <4> ---- + <1> declare `int x`; @@ -436,9 +436,9 @@ instance_of: ID 'instanceof' TYPE; + [source,Painless] ---- -<1> Map m = new HashMap(); -<2> boolean a = m instanceof HashMap; -<3> boolean b = m instanceof Map; +Map m = new HashMap(); <1> +boolean a = m instanceof HashMap; <2> +boolean b = m instanceof Map; <3> ---- + <1> declare `Map m`; @@ -461,9 +461,9 @@ instance_of: ID 'instanceof' TYPE; + [source,Painless] ---- -<1> def d = new ArrayList(); -<2> boolean a = d instanceof List; -<3> boolean b = d instanceof Map; +def d = new ArrayList(); <1> +boolean a = d instanceof List; <2> +boolean b = d instanceof Map; <3> ---- + <1> declare `def d`; @@ -531,10 +531,10 @@ equality_equals: expression '==' expression; + [source,Painless] ---- -<1> boolean a = true; -<2> boolean b = false; -<3> a = a == false; -<4> b = a == b; +boolean a = true; <1> +boolean b = false; <2> +a = a == false; <3> +b = a == b; <4> ---- + <1> declare `boolean a`; @@ -554,10 +554,10 @@ equality_equals: expression '==' expression; + [source,Painless] ---- -<1> int a = 1; -<2> double b = 2.0; -<3> boolean c = a == b; -<4> c = 1 == a; +int a = 1; <1> +double b = 2.0; <2> +boolean c = a == b; <3> +c = 1 == a; <4> ---- + <1> declare `int a`; @@ -579,12 +579,12 @@ equality_equals: expression '==' expression; + [source,Painless] ---- -<1> List a = new ArrayList(); -<2> List b = new ArrayList(); -<3> a.add(1); -<4> boolean c = a == b; -<5> b.add(1); -<6> c = a == b; +List a = new ArrayList(); <1> +List b = new ArrayList(); <2> +a.add(1); <3> +boolean c = a == b; <4> +b.add(1); <5> +c = a == b; <6> ---- + <1> declare `List a`; @@ -615,12 +615,12 @@ equality_equals: expression '==' expression; + [source,Painless] ---- -<1> Object a = null; -<2> Object b = null; -<3> boolean c = a == null; -<4> c = a == b; -<5> b = new Object(); -<6> c = a == b; +Object a = null; <1> +Object b = null; <2> +boolean c = a == null; <3> +c = a == b; <4> +b = new Object(); <5> +c = a == b; <6> ---- + <1> declare `Object a`; @@ -647,12 +647,12 @@ equality_equals: expression '==' expression; + [source, Painless] ---- -<1> def a = 0; -<2> def b = 1; -<3> boolean c = a == b; -<4> def d = new HashMap(); -<5> def e = new ArrayList(); -<6> c = d == e; +def a = 0; <1> +def b = 1; <2> +boolean c = a == b; <3> +def d = new HashMap(); <4> +def e = new ArrayList(); <5> +c = d == e; <6> ---- + <1> declare `def a`; @@ -733,10 +733,10 @@ equality_not_equals: expression '!=' expression; + [source,Painless] ---- -<1> boolean a = true; -<2> boolean b = false; -<3> a = a != false; -<4> b = a != b; +boolean a = true; <1> +boolean b = false; <2> +a = a != false; <3> +b = a != b; <4> ---- + <1> declare `boolean a`; @@ -755,10 +755,10 @@ equality_not_equals: expression '!=' expression; + [source,Painless] ---- -<1> int a = 1; -<2> double b = 2.0; -<3> boolean c = a != b; -<4> c = 1 != a; +int a = 1; <1> +double b = 2.0; <2> +boolean c = a != b; <3> +c = 1 != a; <4> ---- + <1> declare `int a`; @@ -780,12 +780,12 @@ equality_not_equals: expression '!=' expression; + [source,Painless] ---- -<1> List a = new ArrayList(); -<2> List b = new ArrayList(); -<3> a.add(1); -<4> boolean c = a == b; -<5> b.add(1); -<6> c = a == b; +List a = new ArrayList(); <1> +List b = new ArrayList(); <2> +a.add(1); <3> +boolean c = a == b; <4> +b.add(1); <5> +c = a == b; <6> ---- + <1> declare `List a`; @@ -818,12 +818,12 @@ equality_not_equals: expression '!=' expression; + [source,Painless] ---- -<1> Object a = null; -<2> Object b = null; -<3> boolean c = a == null; -<4> c = a == b; -<5> b = new Object(); -<6> c = a == b; +Object a = null; <1> +Object b = null; <2> +boolean c = a == null; <3> +c = a == b; <4> +b = new Object(); <5> +c = a == b; <6> ---- + <1> declare `Object a`; @@ -851,12 +851,12 @@ equality_not_equals: expression '!=' expression; + [source, Painless] ---- -<1> def a = 0; -<2> def b = 1; -<3> boolean c = a == b; -<4> def d = new HashMap(); -<5> def e = new ArrayList(); -<6> c = d == e; +def a = 0; <1> +def b = 1; <2> +boolean c = a == b; <3> +def d = new HashMap(); <4> +def e = new ArrayList(); <5> +c = d == e; <6> ---- + <1> declare `def a`; @@ -934,11 +934,11 @@ identity_equals: expression '===' expression; + [source,Painless] ---- -<1> List a = new ArrayList(); -<2> List b = new ArrayList(); -<3> List c = a; -<4> boolean c = a === b; -<5> c = a === c; +List a = new ArrayList(); <1> +List b = new ArrayList(); <2> +List c = a; <3> +boolean c = a === b; <4> +c = a === c; <5> ---- + <1> declare `List a`; @@ -969,12 +969,12 @@ identity_equals: expression '===' expression; + [source,Painless] ---- -<1> Object a = null; -<2> Object b = null; -<3> boolean c = a === null; -<4> c = a === b; -<5> b = new Object(); -<6> c = a === b; +Object a = null; <1> +Object b = null; <2> +boolean c = a === null; <3> +c = a === b; <4> +b = new Object(); <5> +c = a === b; <6> ---- + <1> declare `Object a`; @@ -1000,11 +1000,11 @@ identity_equals: expression '===' expression; + [source, Painless] ---- -<1> def a = new HashMap(); -<2> def b = new ArrayList(); -<3> boolean c = a === b; -<4> b = a; -<5> c = a === b; +def a = new HashMap(); <1> +def b = new ArrayList(); <2> +boolean c = a === b; <3> +b = a; <4> +c = a === b; <5> ---- + <1> declare `def d`; @@ -1081,11 +1081,11 @@ identity_not_equals: expression '!==' expression; + [source,Painless] ---- -<1> List a = new ArrayList(); -<2> List b = new ArrayList(); -<3> List c = a; -<4> boolean c = a !== b; -<5> c = a !== c; +List a = new ArrayList(); <1> +List b = new ArrayList(); <2> +List c = a; <3> +boolean c = a !== b; <4> +c = a !== c; <5> ---- + <1> declare `List a`; @@ -1116,12 +1116,12 @@ identity_not_equals: expression '!==' expression; + [source,Painless] ---- -<1> Object a = null; -<2> Object b = null; -<3> boolean c = a !== null; -<4> c = a !== b; -<5> b = new Object(); -<6> c = a !== b; +Object a = null; <1> +Object b = null; <2> +boolean c = a !== null; <3> +c = a !== b; <4> +b = new Object(); <5> +c = a !== b; <6> ---- + <1> declare `Object a`; @@ -1147,11 +1147,11 @@ identity_not_equals: expression '!==' expression; + [source, Painless] ---- -<1> def a = new HashMap(); -<2> def b = new ArrayList(); -<3> boolean c = a !== b; -<4> b = a; -<5> c = a !== b; +def a = new HashMap(); <1> +def b = new ArrayList(); <2> +boolean c = a !== b; <3> +b = a; <4> +c = a !== b; <5> ---- + <1> declare `def d`; @@ -1216,9 +1216,9 @@ boolean_xor: expression '^' expression; + [source,Painless] ---- -<1> boolean x = false; -<2> boolean y = x ^ true; -<3> y = y ^ x; +boolean x = false; <1> +boolean y = x ^ true; <2> +y = y ^ x; <3> ---- + <1> declare `boolean x`; @@ -1236,9 +1236,9 @@ boolean_xor: expression '^' expression; + [source,Painless] ---- -<1> def x = false; -<2> def y = x ^ true; -<3> y = y ^ x; +def x = false; <1> +def y = x ^ true; <2> +y = y ^ x; <3> ---- + <1> declare `def x`; @@ -1292,10 +1292,10 @@ boolean_and: expression '&&' expression; + [source,Painless] ---- -<1> boolean x = true; -<2> boolean y = x && true; -<3> x = false; -<4> y = y && x; +boolean x = true; <1> +boolean y = x && true; <2> +x = false; <3> +y = y && x; <4> ---- + <1> declare `boolean x`; @@ -1314,10 +1314,10 @@ boolean_and: expression '&&' expression; + [source,Painless] ---- -<1> def x = true; -<2> def y = x && true; -<3> x = false; -<4> y = y && x; +def x = true; <1> +def y = x && true; <2> +x = false; <3> +y = y && x; <4> ---- + <1> declare `def x`; @@ -1372,10 +1372,10 @@ boolean_and: expression '||' expression; + [source,Painless] ---- -<1> boolean x = false; -<2> boolean y = x || true; -<3> y = false; -<4> y = y || x; +boolean x = false; <1> +boolean y = x || true; <2> +y = false; <3> +y = y || x; <4> ---- + <1> declare `boolean x`; @@ -1394,10 +1394,10 @@ boolean_and: expression '||' expression; + [source,Painless] ---- -<1> def x = false; -<2> def y = x || true; -<3> y = false; -<4> y = y || x; +def x = false; <1> +def y = x || true; <2> +y = false; <3> +y = y || x; <4> ---- + <1> declare `def x`; diff --git a/docs/painless/painless-operators-general.asciidoc b/docs/painless/painless-operators-general.asciidoc index 9bd057432fb51..6db9b3d5d1eae 100644 --- a/docs/painless/painless-operators-general.asciidoc +++ b/docs/painless/painless-operators-general.asciidoc @@ -22,8 +22,8 @@ precedence: '(' expression ')'; + [source,Painless] ---- -<1> int x = (5+4)*6; -<2> int y = 12/(x-50); +int x = (5+4)*6; <1> +int y = 12/(x-50); <2> ---- + <1> declare `int x`; @@ -59,11 +59,11 @@ function_call: ID '(' ( expression (',' expression)* )? ')''; + [source,Painless] ---- -<1> int add(int x, int y) { +int add(int x, int y) { <1> return x + y; } -<2> int z = add(1, 2); +int z = add(1, 2); <2> ---- + <1> define function `add` that returns `int` and has parameters (`int x`, @@ -128,10 +128,10 @@ occur. + [source,Painless] ---- -<1> boolean b = true; -<2> int x = b ? 1 : 2; -<3> List y = x > 1 ? new ArrayList() : null; -<4> def z = x < 2 ? x : 2.0; +boolean b = true; <1> +int x = b ? 1 : 2; <2> +List y = x > 1 ? new ArrayList() : null; <3> +def z = x < 2 ? x : 2.0; <4> ---- + <1> declare `boolean b`; @@ -195,10 +195,10 @@ non-static member fields: + [source,Painless] ---- -<1> Example example = new Example(); -<2> example.x = 1; -<3> example.y = 2.0; -<4> example.z = new ArrayList(); +Example example = new Example(); <1> +example.x = 1; <2> +example.y = 2.0; <3> +example.z = new ArrayList(); <4> ---- + <1> declare `Example example`; @@ -218,9 +218,9 @@ non-static member fields: + [source,Painless] ---- -<1> Example example = new Example(); -<2> example.x = 1; -<3> example.y = example.x; +Example example = new Example(); <1> +example.x = 1; <2> +example.y = example.x; <3> ---- + <1> declare `Example example`; @@ -297,18 +297,18 @@ operators. + [source,Painless] ---- -<1> int i = 10; -<2> i *= 2; -<3> i /= 5; -<4> i %= 3; -<5> i += 5; -<6> i -= 5; -<7> i <<= 2; -<8> i >>= 1; -<9> i >>>= 1; -<10> i &= 15; -<11> i ^= 12; -<12> i |= 2; +int i = 10; <1> +i *= 2; <2> +i /= 5; <3> +i %= 3; <4> +i += 5; <5> +i -= 5; <6> +i <<= 2; <7> +i >>= 1; <8> +i >>>= 1; <9> +i &= 15; <10> +i ^= 12; <11> +i |= 2; <12> ---- + <1> declare `int i`; @@ -362,10 +362,10 @@ operators. + [source,Painless] ---- -<1> boolean b = true; -<2> b &= false; -<3> b ^= false; -<4> b |= true; +boolean b = true; <1> +b &= false; <2> +b ^= false; <3> +b |= true; <4> ---- + <1> declare `boolean b`; @@ -387,8 +387,8 @@ operators. + [source,Painless] ---- -<1> String s = 'compound'; -<2> s += ' assignment'; +String s = 'compound'; <1> +s += ' assignment'; <2> ---- <1> declare `String s`; store `String 'compound'` to `s`; @@ -402,8 +402,8 @@ operators. + [source,Painless] ---- -<1> def x = 1; -<2> x += 2; +def x = 1; <1> +x += 2; <2> ---- <1> declare `def x`; implicit cast `int 1` to `def`; @@ -419,8 +419,8 @@ operators. + [source,Painless] ---- -<1> byte b = 1; -<2> b += 2; +byte b = 1; <1> +b += 2; <2> ---- <1> declare `byte b`; store `byte 1` to `x`; diff --git a/docs/painless/painless-operators-numeric.asciidoc b/docs/painless/painless-operators-numeric.asciidoc index d39b895908f44..22c2f04d50677 100644 --- a/docs/painless/painless-operators-numeric.asciidoc +++ b/docs/painless/painless-operators-numeric.asciidoc @@ -43,11 +43,11 @@ post_increment: ( variable | field ) '++'; + [source,Painless] ---- -<1> short i = 0; -<2> i++; -<3> long j = 1; -<4> long k; -<5> k = j++; +short i = 0; <1> +i++; <2> +long j = 1; <3> +long k; <4> +k = j++; <5> ---- + <1> declare `short i`; @@ -71,8 +71,8 @@ post_increment: ( variable | field ) '++'; + [source,Painless] ---- -<1> def x = 1; -<2> x++; +def x = 1; <1> +x++; <2> ---- + <1> declare `def x`; @@ -126,11 +126,11 @@ post_decrement: ( variable | field ) '--'; + [source,Painless] ---- -<1> short i = 0; -<2> i--; -<3> long j = 1; -<4> long k; -<5> k = j--; +short i = 0; <1> +i--; <2> +long j = 1; <3> +long k; <4> +k = j--; <5> ---- + <1> declare `short i`; @@ -154,8 +154,8 @@ post_decrement: ( variable | field ) '--'; + [source,Painless] ---- -<1> def x = 1; -<2> x--; +def x = 1; <1> +x--; <2> ---- + <1> declare `def x`; @@ -209,11 +209,11 @@ pre_increment: '++' ( variable | field ); + [source,Painless] ---- -<1> short i = 0; -<2> ++i; -<3> long j = 1; -<4> long k; -<5> k = ++j; +short i = 0; <1> +++i; <2> +long j = 1; <3> +long k; <4> +k = ++j; <5> ---- + <1> declare `short i`; @@ -237,8 +237,8 @@ pre_increment: '++' ( variable | field ); + [source,Painless] ---- -<1> def x = 1; -<2> ++x; +def x = 1; <1> +++x; <2> ---- + <1> declare `def x`; @@ -292,11 +292,11 @@ pre_increment: '--' ( variable | field ); + [source,Painless] ---- -<1> short i = 0; -<2> --i; -<3> long j = 1; -<4> long k; -<5> k = --j; +short i = 0; <1> +--i; <2> +long j = 1; <3> +long k; <4> +k = --j; <5> ---- + <1> declare `short i`; @@ -320,8 +320,8 @@ pre_increment: '--' ( variable | field ); + [source,Painless] ---- -<1> def x = 1; -<2> --x; +def x = 1; <1> +--x; <2> ---- + <1> declare `def x`; @@ -356,8 +356,8 @@ unary_positive: '+' expression; + [source,Painless] ---- -<1> int x = +1; -<2> long y = +x; +int x = +1; <1> +long y = +x; <2> ---- + <1> declare `int x`; @@ -373,8 +373,8 @@ unary_positive: '+' expression; + [source,Painless] ---- -<1> def z = +1; -<2> int i = +z; +def z = +1; <1> +int i = +z; <2> ---- <1> declare `def z`; identity `int 1` -> `int 1`; @@ -408,8 +408,8 @@ unary_negative: '-' expression; + [source,Painless] ---- -<1> int x = -1; -<2> long y = -x; +int x = -1; <1> +long y = -x; <2> ---- + <1> declare `int x`; @@ -425,8 +425,8 @@ unary_negative: '-' expression; + [source,Painless] ---- -<1> def z = -1; -<2> int i = -z; +def z = -1; <1> +int i = -z; <2> ---- <1> declare `def z`; negate `int 1` -> `int -1`; @@ -484,9 +484,9 @@ bitwise_not: '~' expression; + [source,Painless] ---- -<1> byte b = 1; -<2> int i = ~b; -<3> long l = ~i; +byte b = 1; <1> +int i = ~b; <2> +long l = ~i; <3> ---- + <1> declare `byte x`; @@ -506,8 +506,8 @@ bitwise_not: '~' expression; + [source,Painless] ---- -<1> def d = 1; -<2> def e = ~d; +def d = 1; <1> +def e = ~d; <2> ---- + <1> declare `def d`; @@ -559,8 +559,8 @@ multiplication: expression '*' expression; + [source,Painless] ---- -<1> int i = 5*4; -<2> double d = i*7.0; +int i = 5*4; <1> +double d = i*7.0; <2> ---- + <1> declare `int i`; @@ -577,8 +577,8 @@ multiplication: expression '*' expression; + [source,Painless] ---- -<1> def x = 5*4; -<2> def y = x*2; +def x = 5*4; <1> +def y = x*2; <2> ---- <1> declare `def x`; multiply `int 5` by `int 4` -> `int 20`; @@ -632,8 +632,8 @@ division: expression '/' expression; + [source,Painless] ---- -<1> int i = 29/4; -<2> double d = i/7.0; +int i = 29/4; <1> +double d = i/7.0; <2> ---- + <1> declare `int i`; @@ -650,8 +650,8 @@ division: expression '/' expression; + [source,Painless] ---- -<1> def x = 5/4; -<2> def y = x/2; +def x = 5/4; <1> +def y = x/2; <2> ---- <1> declare `def x`; divide `int 5` by `int 4` -> `int 1`; @@ -703,8 +703,8 @@ remainder: expression '%' expression; + [source,Painless] ---- -<1> int i = 29%4; -<2> double d = i%7.0; +int i = 29%4; <1> +double d = i%7.0; <2> ---- + <1> declare `int i`; @@ -721,8 +721,8 @@ remainder: expression '%' expression; + [source,Painless] ---- -<1> def x = 5%4; -<2> def y = x%2; +def x = 5%4; <1> +def y = x%2; <2> ---- <1> declare `def x`; remainder `int 5` by `int 4` -> `int 1`; @@ -773,8 +773,8 @@ addition: expression '+' expression; + [source,Painless] ---- -<1> int i = 29+4; -<2> double d = i+7.0; +int i = 29+4; <1> +double d = i+7.0; <2> ---- + <1> declare `int i`; @@ -791,8 +791,8 @@ addition: expression '+' expression; + [source,Painless] ---- -<1> def x = 5+4; -<2> def y = x+2; +def x = 5+4; <1> +def y = x+2; <2> ---- <1> declare `def x`; add `int 5` and `int 4` -> `int 9`; @@ -844,8 +844,8 @@ subtraction: expression '-' expression; + [source,Painless] ---- -<1> int i = 29-4; -<2> double d = i-7.5; +int i = 29-4; <1> +double d = i-7.5; <2> ---- + <1> declare `int i`; @@ -862,8 +862,8 @@ subtraction: expression '-' expression; + [source,Painless] ---- -<1> def x = 5-4; -<2> def y = x-2; +def x = 5-4; <1> +def y = x-2; <2> ---- <1> declare `def x`; subtract `int 4` and `int 5` -> `int 1`; @@ -918,8 +918,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> int i = 4 << 1; -<2> long l = i << 2L; +int i = 4 << 1; <1> +long l = i << 2L; <2> ---- + <1> declare `int i`; @@ -936,8 +936,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> def x = 4 << 2; -<2> def y = x << 1; +def x = 4 << 2; <1> +def y = x << 1; <2> ---- <1> declare `def x`; left shift `int 4` by `int 2` -> `int 16`; @@ -993,8 +993,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> int i = 32 >> 1; -<2> long l = i >> 2L; +int i = 32 >> 1; <1> +long l = i >> 2L; <2> ---- + <1> declare `int i`; @@ -1011,8 +1011,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> def x = 16 >> 2; -<2> def y = x >> 1; +def x = 16 >> 2; <1> +def y = x >> 1; <2> ---- <1> declare `def x`; right shift `int 16` by `int 2` -> `int 4`; @@ -1068,8 +1068,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> int i = -1 >>> 29; -<2> long l = i >>> 2L; +int i = -1 >>> 29; <1> +long l = i >>> 2L; <2> ---- + <1> declare `int i`; @@ -1086,8 +1086,8 @@ below. The right-hand side integer type value is always implicitly cast to an + [source,Painless] ---- -<1> def x = 16 >>> 2; -<2> def y = x >>> 1; +def x = 16 >>> 2; <1> +def y = x >>> 1; <2> ---- <1> declare `def x`; unsigned right shift `int 16` by `int 2` -> `int 4`; @@ -1146,8 +1146,8 @@ bitwise_and: expression '&' expression; + [source,Painless] ---- -<1> int i = 5 & 6; -<2> long l = i & 5L; +int i = 5 & 6; <1> +long l = i & 5L; <2> ---- + <1> declare `int i`; @@ -1164,8 +1164,8 @@ bitwise_and: expression '&' expression; + [source,Painless] ---- -<1> def x = 15 & 6; -<2> def y = x & 5; +def x = 15 & 6; <1> +def y = x & 5; <2> ---- <1> declare `def x`; bitwise and `int 15` and `int 6` -> `int 6`; @@ -1226,8 +1226,8 @@ bitwise_and: expression '^' expression; + [source,Painless] ---- -<1> int i = 5 ^ 6; -<2> long l = i ^ 5L; +int i = 5 ^ 6; <1> +long l = i ^ 5L; <2> ---- + <1> declare `int i`; @@ -1244,8 +1244,8 @@ bitwise_and: expression '^' expression; + [source,Painless] ---- -<1> def x = 15 ^ 6; -<2> def y = x ^ 5; +def x = 15 ^ 6; <1> +def y = x ^ 5; <2> ---- <1> declare `def x`; bitwise xor `int 15` and `int 6` -> `int 9`; @@ -1306,8 +1306,8 @@ bitwise_and: expression '|' expression; + [source,Painless] ---- -<1> int i = 5 | 6; -<2> long l = i | 8L; +int i = 5 | 6; <1> +long l = i | 8L; <2> ---- + <1> declare `int i`; @@ -1324,8 +1324,8 @@ bitwise_and: expression '|' expression; + [source,Painless] ---- -<1> def x = 5 ^ 6; -<2> def y = x ^ 8; +def x = 5 ^ 6; <1> +def y = x ^ 8; <2> ---- <1> declare `def x`; bitwise or `int 5` and `int 6` -> `int 7`; diff --git a/docs/painless/painless-operators-reference.asciidoc b/docs/painless/painless-operators-reference.asciidoc index 487fcce15f31b..dbdae92b270ad 100644 --- a/docs/painless/painless-operators-reference.asciidoc +++ b/docs/painless/painless-operators-reference.asciidoc @@ -38,12 +38,12 @@ arguments: '(' (expression (',' expression)*)? ')'; + [source,Painless] ---- -<1> Map m = new HashMap(); -<2> m.put(1, 2); -<3> int z = m.get(1); -<4> def d = new ArrayList(); -<5> d.add(1); -<6> int i = Integer.parseInt(d.get(0).toString()); +Map m = new HashMap(); <1> +m.put(1, 2); <2> +int z = m.get(1); <3> +def d = new ArrayList(); <4> +d.add(1); <5> +int i = Integer.parseInt(d.get(0).toString()); <6> ---- + <1> declare `Map m`; @@ -111,12 +111,12 @@ non-static member fields: + [source,Painless] ---- -<1> Example example = new Example(); -<2> example.x = 1; -<3> example.y = example.x; -<4> example.z = new ArrayList(); -<5> example.z.add(1); -<6> example.x = example.z.get(0); +Example example = new Example(); <1> +example.x = 1; <2> +example.y = example.x; <3> +example.z = new ArrayList(); <4> +example.z.add(1); <5> +example.x = example.z.get(0); <6> ---- + <1> declare `Example example`; @@ -192,8 +192,8 @@ non-static member fields: + [source,Painless] ---- -<1> Example example = new Example(); -<2> List x = example?.factory(); +Example example = new Example(); <1> +List x = example?.factory(); <2> ---- + <1> declare `Example example`; @@ -208,8 +208,8 @@ non-static member fields: + [source,Painless] ---- -<1> Example example = null; -<2> List x = example?.x; +Example example = null; <1> +List x = example?.x; <2> ---- <1> declare `Example example`; store `null` to `example` @@ -242,7 +242,7 @@ list_initialization: '[' expression (',' expression)* ']' + [source,Painless] ---- -<1> List empty = []; +List empty = []; <1> ---- + <1> declare `List empty`; @@ -254,7 +254,7 @@ list_initialization: '[' expression (',' expression)* ']' + [source,Painless] ---- -<1> List list = [1, 2, 3]; +List list = [1, 2, 3]; <1> ---- + <1> declare `List list`; @@ -269,12 +269,12 @@ list_initialization: '[' expression (',' expression)* ']' + [source,Painless] ---- -<1> int i = 1; -<2> long l = 2L; -<3> float f = 3.0F; -<4> double d = 4.0; -<5> String s = "5"; -<6> List list = [i, l, f*d, s]; +int i = 1; <1> +long l = 2L; <2> +float f = 3.0F; <3> +double d = 4.0; <4> +String s = "5"; <5> +List list = [i, l, f*d, s]; <6> ---- + <1> declare `int i`; @@ -329,15 +329,15 @@ list_access: '[' expression ']' + [source,Painless] ---- -<1> List list = new ArrayList(); -<2> list.add(1); -<3> list.add(2); -<4> list.add(3); -<5> list[0] = 2; -<6> list[1] = 5; -<7> int x = list[0] + list[1]; -<8> int y = 1; -<9> int z = list[y]; +List list = new ArrayList(); <1> +list.add(1); <2> +list.add(2); <3> +list.add(3); <4> +list[0] = 2; <5> +list[1] = 5; <6> +int x = list[0] + list[1]; <7> +int y = 1; <8> +int z = list[y]; <9> ---- + <1> declare `List list`; @@ -376,15 +376,15 @@ list_access: '[' expression ']' + [source,Painless] ---- -<1> def d = new ArrayList(); -<2> d.add(1); -<3> d.add(2); -<4> d.add(3); -<5> d[0] = 2; -<6> d[1] = 5; -<7> def x = d[0] + d[1]; -<8> def y = 1; -<9> def z = d[y]; +def d = new ArrayList(); <1> +d.add(1); <2> +d.add(2); <3> +d.add(3); <4> +d[0] = 2; <5> +d[1] = 5; <6> +def x = d[0] + d[1]; <7> +def y = 1; <8> +def z = d[y]; <9> ---- + <1> declare `List d`; @@ -449,7 +449,7 @@ key_pair: expression ':' expression + [source,Painless] ---- -<1> Map empty = [:]; +Map empty = [:]; <1> ---- + <1> declare `Map empty`; @@ -461,7 +461,7 @@ key_pair: expression ':' expression + [source,Painless] ---- -<1> Map map = [1:2, 3:4, 5:6]; +Map map = [1:2, 3:4, 5:6]; <1> ---- + <1> declare `Map map`; @@ -476,13 +476,13 @@ key_pair: expression ':' expression + [source,Painless] ---- -<1> byte b = 0; -<2> int i = 1; -<3> long l = 2L; -<4> float f = 3.0F; -<5> double d = 4.0; -<6> String s = "5"; -<7> Map map = [b:i, l:f*d, d:s]; +byte b = 0; <1> +int i = 1; <2> +long l = 2L; <3> +float f = 3.0F; <4> +double d = 4.0; <5> +String s = "5"; <6> +Map map = [b:i, l:f*d, d:s]; <7> ---- + <1> declare `byte b`; @@ -538,12 +538,12 @@ map_access: '[' expression ']' + [source,Painless] ---- -<1> Map map = new HashMap(); -<2> map['value2'] = 2; -<3> map['value5'] = 5; -<4> int x = map['value2'] + map['value5']; -<5> String y = 'value5'; -<6> int z = x[z]; +Map map = new HashMap(); <1> +map['value2'] = 2; <2> +map['value5'] = 5; <3> +int x = map['value2'] + map['value5']; <4> +String y = 'value5'; <5> +int z = x[z]; <6> ---- + <1> declare `Map map`; @@ -576,12 +576,12 @@ map_access: '[' expression ']' + [source,Painless] ---- -<1> def d = new HashMap(); -<2> d['value2'] = 2; -<3> d['value5'] = 5; -<4> int x = d['value2'] + d['value5']; -<5> String y = 'value5'; -<6> def z = d[y]; +def d = new HashMap(); <1> +d['value2'] = 2; <2> +d['value5'] = 5; <3> +int x = d['value2'] + d['value5']; <4> +String y = 'value5'; <5> +def z = d[y]; <6> ---- + <1> declare `def d`; @@ -649,9 +649,9 @@ new_instance: 'new' TYPE '(' (expression (',' expression)*)? ')'; [source,Painless] ---- -<1> Map m = new HashMap(); -<2> def d = new ArrayList(); -<3> def e = new HashMap(m); +Map m = new HashMap(); <1> +def d = new ArrayList(); <2> +def e = new HashMap(m); <3> ---- <1> declare `Map m`; allocate `HashMap` instance -> `HashMap reference`; @@ -687,9 +687,9 @@ concatenate: expression '+' expression; + [source,Painless] ---- -<1> String x = "con"; -<2> String y = x + "cat"; -<3> String z = 4 + 5 + x; +String x = "con"; <1> +String y = x + "cat"; <2> +String z = 4 + 5 + x; <3> ---- + <1> declare `String x`; @@ -709,8 +709,8 @@ concatenate: expression '+' expression; + [source,Painless] ---- -<1> def d = 2; -<2> d = "con" + d + "cat"; +def d = 2; <1> +d = "con" + d + "cat"; <2> ---- + <1> declare `def`; @@ -749,10 +749,10 @@ elvis: expression '?:' expression; + [source,Painless] ---- -<1> List x = new ArrayList(); -<2> List y = x ?: new ArrayList(); -<3> y = null; -<4> List z = y ?: new ArrayList(); +List x = new ArrayList(); <1> +List y = x ?: new ArrayList(); <2> +y = null; <3> +List z = y ?: new ArrayList(); <4> ---- + <1> declare `List x`; diff --git a/docs/painless/painless-statements.asciidoc b/docs/painless/painless-statements.asciidoc index 3bc4513baa733..b9aceed9cf097 100644 --- a/docs/painless/painless-statements.asciidoc +++ b/docs/painless/painless-statements.asciidoc @@ -4,6 +4,23 @@ Painless supports all of Java's https://docs.oracle.com/javase/tutorial/java/nutsandbolts/flow.html[ control flow statements] except the `switch` statement. +==== Conditional statements + +===== If / Else + +[source,painless] +--------------------------------------------------------- +if (doc[item].size() == 0) { + // do something if "item" is missing +} else { + // do something else +} +--------------------------------------------------------- + +==== Loop statements + +===== For + Painless also supports the `for in` syntax from Groovy: [source,painless] @@ -11,4 +28,4 @@ Painless also supports the `for in` syntax from Groovy: for (item : list) { ... } ---------------------------------------------------------- \ No newline at end of file +--------------------------------------------------------- diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 65ae9b3f70360..7588d6a273a90 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -77,9 +77,9 @@ logical quantity with two possible values of `true` and `false` + [source,Painless] ---- -<1> int i = 1; -<2> double d; -<3> boolean b = true; +int i = 1; <1> +double d; <2> +boolean b = true; <3> ---- + <1> declare `int i`; @@ -93,8 +93,8 @@ logical quantity with two possible values of `true` and `false` + [source,Painless] ---- -<1> int i = 1; -<2> i.toString(); +int i = 1; <1> +i.toString(); <2> ---- + <1> declare `int i`; @@ -193,9 +193,9 @@ relationships. + [source,Painless] ---- -<1> List l = new ArrayList(); -<2> l.add(1); -<3> int i = l.get(0) + 2; +List l = new ArrayList(); <1> +l.add(1); <2> +int i = l.get(0) + 2; <3> ---- + <1> declare `List l`; @@ -216,11 +216,11 @@ relationships. + [source,Painless] ---- -<1> List l0 = new ArrayList(); -<2> List l1 = l0; -<3> l0.add(1); -<4> l1.add(2); -<5> int i = l1.get(0) + l0.get(1); +List l0 = new ArrayList(); <1> +List l1 = l0; <2> +l0.add(1); <3> +l1.add(2); <4> +int i = l1.get(0) + l0.get(1); <5> ---- + <1> declare `List l0`; @@ -251,8 +251,8 @@ relationships. + [source,Painless] ---- -<1> int i = Integer.MAX_VALUE; -<2> long l = Long.parseLong("123L"); +int i = Integer.MAX_VALUE; <1> +long l = Long.parseLong("123L"); <2> ---- + <1> declare `int i`; @@ -292,9 +292,9 @@ types and reference types directly when performance is critical. + [source,Painless] ---- -<1> def dp = 1; -<2> def dr = new ArrayList(); -<3> dr = dp; +def dp = 1; <1> +def dr = new ArrayList(); <2> +dr = dp; <3> ---- + <1> declare `def dp`; @@ -312,9 +312,9 @@ types and reference types directly when performance is critical. + [source,Painless] ---- -<1> Object l = new ArrayList(); -<2> def d = l; -<3> d.ensureCapacity(10); +Object l = new ArrayList(); <1> +def d = l; <2> +d.ensureCapacity(10); <3> ---- + <1> declare `Object l`; @@ -348,10 +348,10 @@ instances. + [source,Painless] ---- -<1> String r = "some text"; -<2> String s = 'some text'; -<3> String t = new String("some text"); -<4> String u; +String r = "some text"; <1> +String s = 'some text'; <2> +String t = new String("some text"); <3> +String u; <4> ---- + <1> declare `String r`; @@ -425,11 +425,11 @@ type `int[]`. And each element in the 1st dimension, `d-1` is the array type + [source,Painless] ---- -<1> int[] x; -<2> float[] y = new float[10]; -<3> def z = new float[5]; -<4> y[9] = 1.0F; -<5> z[0] = y[9]; +int[] x; <1> +float[] y = new float[10]; <2> +def z = new float[5]; <3> +y[9] = 1.0F; <4> +z[0] = y[9]; <5> ---- + <1> declare `int[] x`; @@ -456,9 +456,9 @@ type `int[]`. And each element in the 1st dimension, `d-1` is the array type + [source,Painless] ---- -<1> int[][][] ia3 = new int[2][3][4]; -<2> ia3[1][2][3] = 99; -<3> int i = ia3[1][2][3]; +int[][][] ia3 = new int[2][3][4]; <1> +ia3[1][2][3] = 99; <2> +int i = ia3[1][2][3]; <3> ---- + <1> declare `int[][][] ia`; diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-variables.asciidoc index 8f83b9e2b573b..d86b8ba17214a 100644 --- a/docs/painless/painless-variables.asciidoc +++ b/docs/painless/painless-variables.asciidoc @@ -36,13 +36,13 @@ assignment: '=' expression; + [source,Painless] ---- -<1> int x; -<2> List y; -<3> int x, y = 5, z; -<4> def d; -<5> int i = 10; -<6> float[] f; -<7> Map[][] m; +int x; <1> +List y; <2> +int x, y = 5, z; <3> +def d; <4> +int i = 10; <5> +float[] f; <6> +Map[][] m; <7> ---- + <1> declare `int x`; @@ -90,8 +90,8 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> int i; -<2> i = 10; +int i; <1> +i = 10; <2> ---- + <1> declare `int i`; @@ -102,8 +102,8 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> int i = 10; -<2> double j = 2.0; +int i = 10; <1> +double j = 2.0; <2> ---- + <1> declare `int i`; @@ -115,8 +115,8 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> int i = 10; -<2> int j = i; +int i = 10; <1> +int j = i; <2> ---- + <1> declare `int i`; @@ -130,8 +130,8 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> ArrayList l = new ArrayList(); -<2> Map m = new HashMap(); +ArrayList l = new ArrayList(); <1> +Map m = new HashMap(); <2> ---- + <1> declare `ArrayList l`; @@ -146,10 +146,10 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> List l = new ArrayList(); -<2> List k = l; -<3> List m; -<4> m = k; +List l = new ArrayList(); <1> +List k = l; <2> +List m; <3> +m = k; <4> ---- + <1> declare `List l`; @@ -171,13 +171,13 @@ assignment: ID '=' expression + [source,Painless] ---- -<1> int[] ia1; -<2> ia1 = new int[2]; -<3> ia1[0] = 1; -<4> int[] ib1 = ia1; -<5> int[][] ic2 = new int[2][5]; -<6> ic2[1][3] = 2; -<7> ic2[0] = ia1; +int[] ia1; <1> +ia1 = new int[2]; <2> +ia1[0] = 1; <3> +int[] ib1 = ia1; <4> +int[][] ic2 = new int[2][5]; <5> +ic2[1][3] = 2; <6> +ic2[0] = ia1; <7> ---- + <1> declare `int[] ia1`; diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index 51be1907c9869..7f3cb3c491bc0 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -1,7 +1,7 @@ [[analysis-icu]] === ICU Analysis Plugin -The ICU Analysis plugin integrates the Lucene ICU module into elasticsearch, +The ICU Analysis plugin integrates the Lucene ICU module into {es}, adding extended Unicode support using the http://site.icu-project.org/[ICU] libraries, including better analysis of Asian languages, Unicode normalization, Unicode-aware case folding, collation support, and @@ -29,7 +29,7 @@ include::install_remove.asciidoc[] [[analysis-icu-analyzer]] ==== ICU Analyzer -Performs basic normalization, tokenization and character folding, using the +The `icu_analyzer` analyzer performs basic normalization, tokenization and character folding, using the `icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter The following parameters are accepted: diff --git a/docs/plugins/analysis.asciidoc b/docs/plugins/analysis.asciidoc index 0a0bbf090495f..68ba99f7a4233 100644 --- a/docs/plugins/analysis.asciidoc +++ b/docs/plugins/analysis.asciidoc @@ -49,14 +49,10 @@ Provides stemming for Ukrainian. A number of analysis plugins have been contributed by our community: -* https://github.com/synhershko/elasticsearch-analysis-hebrew[Hebrew Analysis Plugin] (by Itamar Syn-Hershko) * https://github.com/medcl/elasticsearch-analysis-ik[IK Analysis Plugin] (by Medcl) -* https://github.com/medcl/elasticsearch-analysis-mmseg[Mmseg Analysis Plugin] (by Medcl) -* https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov) * https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl) * https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do) * https://github.com/ofir123/elasticsearch-network-analysis[Network Addresses Analysis Plugin] (by Ofir123) -* https://github.com/medcl/elasticsearch-analysis-string2int[String2Integer Analysis Plugin] (by Medcl) * https://github.com/ZarHenry96/elasticsearch-dandelion-plugin[Dandelion Analysis Plugin] (by ZarHenry96) * https://github.com/medcl/elasticsearch-analysis-stconvert[STConvert Analysis Plugin] (by Medcl) diff --git a/docs/plugins/api.asciidoc b/docs/plugins/api.asciidoc index 74fbba25810d8..7eeba28b22266 100644 --- a/docs/plugins/api.asciidoc +++ b/docs/plugins/api.asciidoc @@ -23,9 +23,6 @@ A number of plugins have been contributed by our community: * https://github.com/zentity-io/zentity[Entity Resolution Plugin] (https://zentity.io[zentity]): Real-time entity resolution with pure Elasticsearch (by Dave Moore) -* https://github.com/NLPchina/elasticsearch-sql/[SQL language Plugin]: - Allows Elasticsearch to be queried with SQL (by nlpcn) - * https://github.com/ritesh-kapoor/elasticsearch-pql[PQL language Plugin]: Allows Elasticsearch to be queried with simple pipeline query syntax. diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 61161836e1a3a..949426513c533 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -4,9 +4,7 @@ The Azure Classic Discovery plugin uses the Azure Classic API to identify the addresses of seed hosts. -// TODO: Link to ARM plugin when ready -// See issue https://github.com/elastic/elasticsearch/issues/19146 -deprecated[5.0.0, Use coming Azure ARM Discovery plugin instead] +deprecated[5.0.0, This plugin will be removed in the future] :plugin_name: discovery-azure-classic include::install_remove.asciidoc[] @@ -374,7 +372,7 @@ This command should give you a JSON result: "version" : { "number" : "{version_qualified}", "build_flavor" : "{build_flavor}", - "build_type" : "zip", + "build_type" : "{build_type}", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 9ec6b7bab054f..27233d8e4b7ab 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -1,34 +1,52 @@ [[discovery-ec2]] === EC2 Discovery Plugin -The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] -to identify the addresses of seed hosts. +The EC2 discovery plugin provides a list of seed addresses to the +{ref}/modules-discovery-hosts-providers.html[discovery process] by querying the +https://github.com/aws/aws-sdk-java[AWS API] for a list of EC2 instances +matching certain criteria determined by the <>. -*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.* +*If you are looking for a hosted solution of {es} on AWS, please visit +http://www.elastic.co/cloud.* :plugin_name: discovery-ec2 include::install_remove.asciidoc[] [[discovery-ec2-usage]] -==== Getting started with AWS +==== Using the EC2 discovery plugin -The plugin adds a seed hosts provider named `ec2`. This seed hosts provider -finds other Elasticsearch instances in EC2 by querying the AWS metadata -service. Authentication is done using -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM -Role] credentials by default. To enable the plugin, configure {es} to use the -`ec2` seed hosts provider: +The `discovery-ec2` plugin allows {es} to find the master-eligible nodes in a +cluster running on AWS EC2 by querying the +https://github.com/aws/aws-sdk-java[AWS API] for the addresses of the EC2 +instances running these nodes. + +It is normally a good idea to restrict the discovery process just to the +master-eligible nodes in the cluster. This plugin allows you to identify these +nodes by certain criteria including their tags, their membership of security +groups, and their placement within availability zones. The discovery process +will work correctly even if it finds master-ineligible nodes, but master +elections will be more efficient if this can be avoided. + +The interaction with the AWS API can be authenticated using the +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[instance +role], or else custom credentials can be supplied. + +===== Enabling EC2 discovery + +To enable EC2 discovery, configure {es} to use the `ec2` seed hosts provider: [source,yaml] ---- discovery.seed_providers: ec2 ---- -==== Settings +===== Configuring EC2 discovery -EC2 discovery supports a number of settings. Some settings are sensitive and -must be stored in the {ref}/secure-settings.html[elasticsearch keystore]. For -example, to use explicit AWS access keys: +EC2 discovery supports a number of settings. Some settings are sensitive and +must be stored in the {ref}/secure-settings.html[{es} keystore]. For example, +to authenticate using a particular access key and secret key, add these keys to +the keystore by running the following commands: [source,sh] ---- @@ -36,132 +54,161 @@ bin/elasticsearch-keystore add discovery.ec2.access_key bin/elasticsearch-keystore add discovery.ec2.secret_key ---- -The following are the available discovery settings. All should be prefixed with `discovery.ec2.`. -Those that must be stored in the keystore are marked as `Secure`. +The available settings for the EC2 discovery plugin are as follows. + +`discovery.ec2.access_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: -`access_key`:: + An EC2 access key. If set, you must also set `discovery.ec2.secret_key`. + If unset, `discovery-ec2` will instead use the instance role. This setting + is sensitive and must be stored in the {es} keystore. - An ec2 access key. The `secret_key` setting must also be specified. (Secure) +`discovery.ec2.secret_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: -`secret_key`:: + An EC2 secret key. If set, you must also set `discovery.ec2.access_key`. + This setting is sensitive and must be stored in the {es} keystore. - An ec2 secret key. The `access_key` setting must also be specified. (Secure) +`discovery.ec2.session_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: -`session_token`:: - An ec2 session token. The `access_key` and `secret_key` settings must also - be specified. (Secure) + An EC2 session token. If set, you must also set `discovery.ec2.access_key` + and `discovery.ec2.secret_key`. This setting is sensitive and must be + stored in the {es} keystore. -`endpoint`:: +`discovery.ec2.endpoint`:: - The ec2 service endpoint to connect to. See - http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region. This - defaults to `ec2.us-east-1.amazonaws.com`. + The EC2 service endpoint to which to connect. See + http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region to find + the appropriate endpoint for the region. This setting defaults to + `ec2.us-east-1.amazonaws.com` which is appropriate for clusters running in + the `us-east-1` region. -`protocol`:: +`discovery.ec2.protocol`:: - The protocol to use to connect to ec2. Valid values are either `http` - or `https`. Defaults to `https`. + The protocol to use to connect to the EC2 service endpoint, which may be + either `http` or `https`. Defaults to `https`. -`proxy.host`:: +`discovery.ec2.proxy.host`:: - The host name of a proxy to connect to ec2 through. + The address or host name of an HTTP proxy through which to connect to EC2. + If not set, no proxy is used. -`proxy.port`:: +`discovery.ec2.proxy.port`:: - The port of a proxy to connect to ec2 through. + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the port to use to connect to the proxy. Defaults to + `80`. -`proxy.username`:: +`discovery.ec2.proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The username to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the username to use to connect to the proxy. When + not set, no username is used. This setting is sensitive and must be stored + in the {es} keystore. -`proxy.password`:: +`discovery.ec2.proxy.password` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The password to connect to the `proxy.host` with. (Secure) + When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`, + this setting determines the password to use to connect to the proxy. When + not set, no password is used. This setting is sensitive and must be stored + in the {es} keystore. -`read_timeout`:: +`discovery.ec2.read_timeout`:: - The socket timeout for connecting to ec2. The value should specify the unit. For example, - a value of `5s` specifies a 5 second timeout. The default value is 50 seconds. + The socket timeout for connections to EC2, + {ref}/common-options.html#time-units[including the units]. For example, a + value of `60s` specifies a 60-second timeout. Defaults to 50 seconds. -`groups`:: +`discovery.ec2.groups`:: - Either a comma separated list or array based list of (security) groups. - Only instances with the provided security groups will be used in the - cluster discovery. (NOTE: You could provide either group NAME or group - ID.) + A list of the names or IDs of the security groups to use for discovery. The + `discovery.ec2.any_group` setting determines the behaviour of this setting. + Defaults to an empty list, meaning that security group membership is + ignored by EC2 discovery. -`host_type`:: +`discovery.ec2.any_group`:: + + Defaults to `true`, meaning that instances belonging to _any_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. If set to `false`, only instances that belong to _all_ of the + security groups specified in `discovery.ec2.groups` will be used for + discovery. + +`discovery.ec2.host_type`:: + -- -The type of host type to use to communicate with other instances. Can be -one of `private_ip`, `public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where -`TAGNAME` refers to a name of a tag configured for all EC2 instances. Instances which don't -have this tag set will be ignored by the discovery process. -For example if you defined a tag `my-elasticsearch-host` in ec2 and set it to `myhostname1.mydomain.com`, then -setting `host_type: tag:my-elasticsearch-host` will tell Discovery Ec2 plugin to read the host name from the -`my-elasticsearch-host` tag. In this case, it will be resolved to `myhostname1.mydomain.com`. -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more about EC2 Tags]. +Each EC2 instance has a number of different addresses that might be suitable +for discovery. This setting allows you to select which of these addresses is +used by the discovery process. It can be set to one of `private_ip`, +`public_ip`, `private_dns`, `public_dns` or `tag:TAGNAME` where `TAGNAME` +refers to a name of a tag. This setting defaults to `private_ip`. -Defaults to `private_ip`. --- +If you set `discovery.ec2.host_type` to a value of the form `tag:TAGNAME` then +the value of the tag `TAGNAME` attached to each instance will be used as that +instance's address for discovery. Instances which do not have this tag set will +be ignored by the discovery process. -`availability_zones`:: +For example if you tag some EC2 instances with a tag named +`elasticsearch-host-name` and set `host_type: tag:elasticsearch-host-name` then +the `discovery-ec2` plugin will read each instance's host name from the value +of the `elasticsearch-host-name` tag. +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[Read more +about EC2 Tags]. - Either a comma separated list or array based list of availability zones. - Only instances within the provided availability zones will be used in the - cluster discovery. +-- -`any_group`:: +`discovery.ec2.availability_zones`:: - If set to `false`, will require all security groups to be present for the - instance to be used for the discovery. Defaults to `true`. + A list of the names of the availability zones to use for discovery. The + name of an availability zone is the + https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[region + code followed by a letter], such as `us-east-1a`. Only instances placed in + one of the given availability zones will be used for discovery. -`node_cache_time`:: +[[discovery-ec2-filtering]] +`discovery.ec2.tag.TAGNAME`:: - How long the list of hosts is cached to prevent further requests to the AWS API. - Defaults to `10s`. ++ +-- -*All* secure settings of this plugin are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, an aws sdk client with the latest settings -from the keystore will be used. +A list of the values of a tag called `TAGNAME` to use for discovery. If set, +only instances that are tagged with one of the given values will be used for +discovery. For instance, the following settings will only use nodes with a +`role` tag set to `master` and an `environment` tag set to either `dev` or +`staging`. -[IMPORTANT] -.Binding the network host -============================================== +[source,yaml] +---- +discovery.ec2.tag.role: master +discovery.ec2.tag.environment: dev,staging +---- -It's important to define `network.host` as by default it's bound to `localhost`. +NOTE: The names of tags used for discovery may only contain ASCII letters, +numbers, hyphens and underscores. In particular you cannot use tags whose name +includes a colon. -You can use {ref}/modules-network.html[core network host settings] or -<>: +-- -============================================== +`discovery.ec2.node_cache_time`:: -[[discovery-ec2-network-host]] -===== EC2 Network Host + Sets the length of time for which the collection of discovered instances is + cached. {es} waits at least this long between requests for discovery + information from the EC2 API. AWS may reject discovery requests if they are + made too often, and this would cause discovery to fail. Defaults to `10s`. -When the `discovery-ec2` plugin is installed, the following are also allowed -as valid network host settings: +All **secure** settings of this plugin are +{ref}/secure-settings.html#reloadable-secure-settings[reloadable], allowing you +to update the secure settings for this plugin without needing to restart each +node. -[cols="<,<",options="header",] -|================================================================== -|EC2 Host Value |Description -|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. -|`_ec2:privateDns_` |The private host of the machine. -|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. -|`_ec2:publicDns_` |The public host of the machine. -|`_ec2:privateIp_` |equivalent to `_ec2:privateIpv4_`. -|`_ec2:publicIp_` |equivalent to `_ec2:publicIpv4_`. -|`_ec2_` |equivalent to `_ec2:privateIpv4_`. -|================================================================== [[discovery-ec2-permissions]] -===== Recommended EC2 Permissions +===== Recommended EC2 permissions -EC2 discovery requires making a call to the EC2 service. You'll want to setup -an IAM policy to allow this. You can create a custom policy via the IAM -Management Console. It should look similar to this. +The `discovery-ec2` plugin works by making a `DescribeInstances` call to the AWS +EC2 API. You must configure your AWS account to allow this, which is normally +done using an IAM policy. You can create a custom policy via the IAM Management +Console. It should look similar to this. [source,js] ---- @@ -182,60 +229,138 @@ Management Console. It should look similar to this. ---- // NOTCONSOLE -[[discovery-ec2-filtering]] -===== Filtering by Tags - -The ec2 discovery plugin can also filter machines to include in the cluster -based on tags (and not just groups). The settings to use include the -`discovery.ec2.tag.` prefix. For example, if you defined a tag `stage` in EC2 -and set it to `dev`, setting `discovery.ec2.tag.stage` to `dev` will only -filter instances with a tag key set to `stage`, and a value of `dev`. Adding -multiple `discovery.ec2.tag` settings will require all of those tags to be set -for the instance to be included. - -One practical use for tag filtering is when an ec2 cluster contains many nodes -that are not master-eligible {es} nodes. In this case, tagging the ec2 -instances that _are_ running the master-eligible {es} nodes, and then filtering -by that tag, will help discovery to run more efficiently. - [[discovery-ec2-attributes]] -===== Automatic Node Attributes +===== Automatic node attributes -Though not dependent on actually using `ec2` as discovery (but still requires the `discovery-ec2` plugin installed), the -plugin can automatically add node attributes relating to ec2. In the future this may support other attributes, but this will -currently only add an `aws_availability_zone` node attribute, which is the availability zone of the current node. Attributes -can be used to isolate primary and replica shards across availability zones by using the +The `discovery-ec2` plugin can automatically set the `aws_availability_zone` +node attribute to the availability zone of each node. This node attribute +allows you to ensure that each shard has copies allocated redundantly across +multiple availability zones by using the {ref}/allocation-awareness.html[Allocation Awareness] feature. -In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings. For example: +In order to enable the automatic definition of the `aws_availability_zone` +attribute, set `cloud.node.auto_attributes` to `true`. For example: [source,yaml] ---- cloud.node.auto_attributes: true - cluster.routing.allocation.awareness.attributes: aws_availability_zone ---- +The `aws_availability_zone` attribute can be automatically set like this when +using any discovery type. It is not necessary to set `discovery.seed_providers: +ec2`. However this feature does require that the `discovery-ec2` plugin is +installed. + +[[discovery-ec2-network-host]] +===== Binding to the correct address + +It is important to define `network.host` correctly when deploying a cluster on +EC2. By default each {es} node only binds to `localhost`, which will prevent it +from being discovered by nodes running on any other instances. + +You can use the {ref}/modules-network.html[core network host settings] to bind +each node to the desired address, or you can set `network.host` to one of the +following EC2-specific settings provided by the `discovery-ec2` plugin: + +[cols="<,<",options="header",] +|================================================================== +|EC2 Host Value |Description +|`_ec2:privateIpv4_` |The private IP address (ipv4) of the machine. +|`_ec2:privateDns_` |The private host of the machine. +|`_ec2:publicIpv4_` |The public IP address (ipv4) of the machine. +|`_ec2:publicDns_` |The public host of the machine. +|`_ec2:privateIp_` |Equivalent to `_ec2:privateIpv4_`. +|`_ec2:publicIp_` |Equivalent to `_ec2:publicIpv4_`. +|`_ec2_` |Equivalent to `_ec2:privateIpv4_`. +|================================================================== + +These values are acceptable when using any discovery type. They do not require +you to set `discovery.seed_providers: ec2`. However they do require that the +`discovery-ec2` plugin is installed. + [[cloud-aws-best-practices]] ==== Best Practices in AWS -Collection of best practices and other information around running Elasticsearch on AWS. +This section contains some other information about designing and managing an +{es} cluster on your own AWS infrastructure. If you would prefer to avoid these +operational details then you may be interested in a hosted {es} installation +available on AWS-based infrastructure from http://www.elastic.co/cloud. + +===== Storage + +EC2 instances offer a number of different kinds of storage. Please be aware of +the folowing when selecting the storage for your cluster: + +* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance +Store] is recommended for {es} clusters as it offers excellent performance and +is cheaper than EBS-based storage. {es} is designed to work well with this kind +of ephemeral storage because it replicates each shard across multiple nodes. If +a node fails and its Instance Store is lost then {es} will rebuild any lost +shards from other copies. -===== Instance/Disk -When selecting disk please be aware of the following order of preference: +* https://aws.amazon.com/ebs/[EBS-based storage] may be acceptable +for smaller clusters (1-2 nodes). Be sure to use provisioned IOPS to ensure +your cluster has satisfactory performance. -* https://aws.amazon.com/efs/[EFS] - Avoid as the sacrifices made to offer durability, shared storage, and grow/shrink come at performance cost, such file systems have been known to cause corruption of indices, and due to Elasticsearch being distributed and having built-in replication, the benefits that EFS offers are not needed. -* https://aws.amazon.com/ebs/[EBS] - Works well if running a small cluster (1-2 nodes) and cannot tolerate the loss all storage backing a node easily or if running indices with no replicas. If EBS is used, then leverage provisioned IOPS to ensure performance. -* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance Store] - When running clusters of larger size and with replicas the ephemeral nature of Instance Store is ideal since Elasticsearch can tolerate the loss of shards. With Instance Store one gets the performance benefit of having disk physically attached to the host running the instance and also the cost benefit of avoiding paying extra for EBS. +* https://aws.amazon.com/efs/[EFS-based storage] is not +recommended or supported as it does not offer satisfactory performance. +Historically, shared network filesystems such as EFS have not always offered +precisely the behaviour that {es} requires of its filesystem, and this has been +known to lead to index corruption. Although EFS offers durability, shared +storage, and the ability to grow and shrink filesystems dynamically, you can +achieve the same benefits using {es} directly. +===== Choice of AMI -Prefer https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as since Elasticsearch runs on the JVM, OS dependencies are very minimal and one can benefit from the lightweight nature, support, and performance tweaks specific to EC2 that the Amazon Linux AMIs offer. +Prefer the https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as these +allow you to benefit from the lightweight nature, support, and EC2-specific +performance enhancements that these images offer. ===== Networking -* Networking throttling takes place on smaller instance types in both the form of https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth and number of connections]. Therefore if large number of connections are needed and networking is becoming a bottleneck, avoid https://aws.amazon.com/ec2/instance-types/[instance types] with networking labeled as `Moderate` or `Low`. -* When running in multiple http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zones] be sure to leverage {ref}/allocation-awareness.html[shard allocation awareness] so that not all copies of shard data reside in the same availability zone. -* Do not span a cluster across regions. If necessary, use a cross cluster search. -===== Misc -* If you have split your nodes into roles, consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the EC2 instances] by role to make it easier to filter and view your EC2 instances in the AWS console. -* Consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling termination protection] for all of your instances to avoid accidentally terminating a node in the cluster and causing a potentially disruptive reallocation. +* Smaller instance types have limited network performance, in terms of both +https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth +and number of connections]. If networking is a bottleneck, avoid +https://aws.amazon.com/ec2/instance-types/[instance types] with networking +labelled as `Moderate` or `Low`. + +* It is a good idea to distribute your nodes across multiple +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability +zones] and use {ref}/allocation-awareness.html[shard allocation awareness] to +ensure that each shard has copies in more than one availability zone. + +* Do not span a cluster across regions. {es} expects that node-to-node +connections within a cluster are reasonably reliable and offer high bandwidth +and low latency, and these properties do not hold for connections between +regions. Although an {es} cluster will behave correctly when node-to-node +connections are unreliable or slow, it is not optimised for this case and its +performance may suffer. If you wish to geographically distribute your data, you +should provision multiple clusters and use features such as +{ref}/modules-cross-cluster-search.html[cross-cluster search] and +{stack-ov}/xpack-ccr.html[cross-cluster replication]. + +===== Other recommendations + +* If you have split your nodes into roles, consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the +EC2 instances] by role to make it easier to filter and view your EC2 instances +in the AWS console. + +* Consider +https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling +termination protection] for all of your data and master-eligible nodes. This +will help to prevent accidental termination of these nodes which could +temporarily reduce the resilience of the cluster and which could cause a +potentially disruptive reallocation of shards. + +* If running your cluster using one or more +https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html[auto-scaling +groups], consider protecting your data and master-eligible nodes +https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection-instance[against +termination during scale-in]. This will help to prevent automatic termination +of these nodes which could temporarily reduce the resilience of the cluster and +which could cause a potentially disruptive reallocation of shards. If these +instances are protected against termination during scale-in then you can use +{ref}/shard-allocation-filtering.html[shard allocation filtering] to gracefully +migrate any data off these nodes before terminating them manually. diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 6d543408f679f..5bb4c3260ac9d 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -72,6 +72,9 @@ releases 2.0 and later do not support rivers. * https://github.com/dadoonet/fscrawler[FS Crawler]: The File System (FS) crawler allows to index documents (PDF, Open Office...) from your local file system and over SSH. (by David Pilato) +* https://github.com/senacor/elasticsearch-evolution[Elasticsearch Evolution]: + A library to migrate elasticsearch mappings. + [float] [[deployment]] === Deployment @@ -108,13 +111,9 @@ releases 2.0 and later do not support rivers. * https://camel.apache.org/elasticsearch.html[Apache Camel Integration]: An Apache camel component to integrate Elasticsearch -* https://metacpan.org/release/Catmandu-Store-ElasticSearch[Catmanadu]: +* https://metacpan.org/pod/Catmandu::Store::ElasticSearch[Catmandu]: An Elasticsearch backend for the Catmandu framework. -* https://github.com/tlrx/elasticsearch-test[elasticsearch-test]: - Elasticsearch Java annotations for unit testing with - http://www.junit.org/[JUnit] - * https://github.com/FriendsOfSymfony/FOSElasticaBundle[FOSElasticaBundle]: Symfony2 Bundle wrapping Elastica. @@ -127,9 +126,6 @@ releases 2.0 and later do not support rivers. * http://hibernate.org/search/[Hibernate Search] Integration with Hibernate ORM, from the Hibernate team. Automatic synchronization of write operations, yet exposes full Elasticsearch capabilities for queries. Can return either Elasticsearch native or re-map queries back into managed entities loaded within transaction from the reference database. -* https://github.com/cleverage/play2-elasticsearch[play2-elasticsearch]: - Elasticsearch module for Play Framework 2.x - * https://github.com/spring-projects/spring-data-elasticsearch[Spring Data Elasticsearch]: Spring Data implementation for Elasticsearch @@ -139,6 +135,27 @@ releases 2.0 and later do not support rivers. * https://github.com/twitter/storehaus[Twitter Storehaus]: Thin asynchronous Scala client for Storehaus. +* https://zeebe.io[Zeebe]: + An Elasticsearch exporter acts as a bridge between Zeebe and Elasticsearch + +* https://pulsar.apache.org/docs/en/io-elasticsearch[Apache Pulsar]: + The Elasticsearch Sink Connector is used to pull messages from Pulsar topics + and persist the messages to a index. + +* https://micronaut-projects.github.io/micronaut-elasticsearch/latest/guide/index.html[Micronaut Elasticsearch Integration]: + Integration of Micronaut with Elasticsearch + +* https://docs.streampipes.org/docs/user-guide-introduction[StreamPipes]: + StreamPipes is a framework that enables users to work with data streams allowing to store data in Elasticsearch. + +* https://metamodel.apache.org/[Apache MetaModel]: + Providing a common interface for discovery, exploration of metadata and querying of different types of data sources. + +* https://jooby.org/doc/elasticsearch/[Jooby Framework]: + Scalable, fast and modular micro web framework for Java. + +* https://micrometer.io[Micrometer]: + Vendor-neutral application metrics facade. Think SLF4j, but for metrics. [float] [[hadoop-integrations]] @@ -151,6 +168,12 @@ releases 2.0 and later do not support rivers. search and analytics natively integrated with Hadoop. Supports Map/Reduce, Cascading, Apache Hive, Apache Pig, Apache Spark and Apache Storm. +[float] +==== Supported by the community: + +* https://github.com/criteo/garmadon[Garmadon]: + Garmadon is a solution for Hadoop Cluster realtime introspection. + [float] [[monitoring-integrations]] @@ -159,25 +182,9 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by the community: -* https://github.com/anchor/nagios-plugin-elasticsearch[check_elasticsearch]: - An Elasticsearch availability and performance monitoring plugin for - Nagios. - * https://github.com/radu-gheorghe/check-es[check-es]: Nagios/Shinken plugins for checking on Elasticsearch -* https://github.com/mattweber/es2graphite[es2graphite]: - Send cluster and indices stats and status to Graphite for monitoring and graphing. - - -* https://itunes.apple.com/us/app/elasticocean/id955278030?ls=1&mt=8[ElasticOcean]: - Elasticsearch & DigitalOcean iOS Real-Time Monitoring tool to keep an eye on DigitalOcean Droplets or Elasticsearch instances or both of them on-a-go. - -* https://github.com/rbramley/Opsview-elasticsearch[opsview-elasticsearch]: - Opsview plugin written in Perl for monitoring Elasticsearch - -* https://scoutapp.com[Scout]: Provides plugins for monitoring Elasticsearch https://scoutapp.com/plugin_urls/1331-elasticsearch-node-status[nodes], https://scoutapp.com/plugin_urls/1321-elasticsearch-cluster-status[clusters], and https://scoutapp.com/plugin_urls/1341-elasticsearch-index-status[indices]. - * http://sematext.com/spm/index.html[SPM for Elasticsearch]: Performance monitoring with live charts showing cluster and node stats, integrated alerts, email reports, etc. @@ -190,23 +197,9 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by the community: -* https://github.com/kodcu/pes[Pes]: - A pluggable elastic JavaScript query DSL builder for Elasticsearch - * https://www.wireshark.org/[Wireshark]: - Protocol dissection for Zen discovery, HTTP and the binary protocol + Protocol dissection for HTTP and the transport protocol * https://www.itemsapi.com/[ItemsAPI]: Search backend for mobile and web - -These projects appear to have been abandoned: - -* http://www.github.com/neogenix/daikon[daikon]: - Daikon Elasticsearch CLI - -* https://github.com/fullscale/dangle[dangle]: - A set of AngularJS directives that provide common visualizations for Elasticsearch based on - D3. -* https://github.com/OlegKunitsyn/eslogd[eslogd]: - Linux daemon that replicates events to a central Elasticsearch server in realtime diff --git a/docs/plugins/mapper-annotated-text.asciidoc b/docs/plugins/mapper-annotated-text.asciidoc index a148cec76bac0..e07bee3c480a7 100644 --- a/docs/plugins/mapper-annotated-text.asciidoc +++ b/docs/plugins/mapper-annotated-text.asciidoc @@ -115,12 +115,12 @@ in this example where a search for `Beck` will not match `Jeff Beck` : # Example documents PUT my_index/_doc/1 { - "my_field": "[Beck](Beck) announced a new tour"<2> + "my_field": "[Beck](Beck) announced a new tour"<1> } PUT my_index/_doc/2 { - "my_field": "[Jeff Beck](Jeff+Beck&Guitarist) plays a strat"<1> + "my_field": "[Jeff Beck](Jeff+Beck&Guitarist) plays a strat"<2> } # Example search diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 0612d3992af17..950a0f86e4149 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -5,7 +5,8 @@ The `plugin` script is used to install, list, and remove plugins. It is located in the `$ES_HOME/bin` directory by default but it may be in a different location depending on which Elasticsearch package you installed: -* {ref}/zip-targz.html#zip-targz-layout[Directory layout of `.zip` and `.tar.gz` archives] +* {ref}/targz.html#targz-layout[Directory layout of `.tar.gz` archives] +* {ref}/zip-windows.html#windows-layout[Directory layout of Windows `.zip` archives] * {ref}/deb.html#deb-layout[Directory layout of Debian package] * {ref}/rpm.html#rpm-layout[Directory layout of RPM] @@ -224,7 +225,8 @@ bin\elasticsearch-plugin install analysis-icu The default location of the `plugins` directory depends on which package you install: -* {ref}/zip-targz.html#zip-targz-layout[Directory layout of `.zip` and `.tar.gz` archives] +* {ref}/targz.html#targz-layout[Directory layout of `.tar.gz` archives] +* {ref}/zip-windows.html#windows-layout[Directory layout of Windows `.zip` archives] * {ref}/deb.html#deb-layout[Directory layout of Debian package] * {ref}/rpm.html#rpm-layout[Directory layout of RPM] diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index df09b28093c80..70df8d0b22557 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -19,9 +19,6 @@ bin/elasticsearch-keystore add azure.client.default.account bin/elasticsearch-keystore add azure.client.default.key ---------------------------------------------------------------- -Where `account` is the azure account name and `key` the azure secret key. -These settings are used by the repository's internal azure client. - Note that you can also define more than one account: [source,sh] @@ -32,42 +29,8 @@ bin/elasticsearch-keystore add azure.client.secondary.account bin/elasticsearch-keystore add azure.client.secondary.key ---------------------------------------------------------------- -`default` is the default account name which will be used by a repository, -unless you set an explicit one in the -<>. - -Both `account` and `key` storage settings are -{ref}/secure-settings.html#reloadable-secure-settings[reloadable]. After you -reload the settings, the internal azure clients, which are used to transfer the -snapshot, will utilize the latest settings from the keystore. - -NOTE: In progress snapshot/restore jobs will not be preempted by a *reload* -of the storage secure settings. They will complete using the client as it was built -when the operation started. - -You can set the client side timeout to use when making any single request. It can be defined globally, per account or both. -It's not set by default which means that Elasticsearch is using the -http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value] -set by the azure client (known as 5 minutes). - -`max_retries` can help to control the exponential backoff policy. It will fix the number of retries -in case of failures before considering the snapshot is failing. Defaults to `3` retries. -The initial backoff period is defined by Azure SDK as `30s`. Which means `30s` of wait time -before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as -`90s`. - -`endpoint_suffix` can be used to specify Azure endpoint suffix explicitly. Defaults to `core.windows.net`. - -[source,yaml] ----- -azure.client.default.timeout: 10s -azure.client.default.max_retries: 7 -azure.client.default.endpoint_suffix: core.chinacloudapi.cn -azure.client.secondary.timeout: 30s ----- - -In this example, timeout will be `10s` per try for `default` with `7` retries before failing -and endpoint suffix will be `core.chinacloudapi.cn` and `30s` per try for `secondary` with `3` retries. +For more information about these settings, see +<>. [IMPORTANT] .Supported Azure Storage Account types @@ -82,18 +45,96 @@ The Azure Repository plugin works with all Standard storage accounts https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage. =============================================== -You can register a proxy per client using the following settings: +[[repository-azure-client-settings]] +==== Client settings -[source,yaml] +The client that you use to connect to Azure has a number of settings available. +The settings have the form `azure.client.CLIENT_NAME.SETTING_NAME`. By default, +`azure` repositories use a client named `default`, but this can be modified using +the <> `client`. +For example: + +[source,js] ---- -azure.client.default.proxy.host: proxy.host -azure.client.default.proxy.port: 8888 -azure.client.default.proxy.type: http +PUT _snapshot/my_backup +{ + "type": "azure", + "settings": { + "client": "secondary" + } +} ---- +// CONSOLE +// TEST[skip:we don't have azure setup while testing this] -Supported values for `proxy.type` are `direct` (default), `http` or `socks`. -When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must be provided. +Most client settings can be added to the `elasticsearch.yml` configuration file. +For example: +[source,yaml] +---- +azure.client.default.timeout: 10s +azure.client.default.max_retries: 7 +azure.client.default.endpoint_suffix: core.chinacloudapi.cn +azure.client.secondary.timeout: 30s +---- + +In this example, the client side timeout is `10s` per try for the `default` +account with `7` retries before failing. The endpoint suffix is +`core.chinacloudapi.cn` and `30s` per try for the `secondary` account with `3` +retries. + +The `account` and `key` storage settings are reloadable secure settings, which +you add to the {es} keystore. For more information about creating and updating +the {es} keystore, see +{ref}/secure-settings.html[Secure settings]. After you reload the settings, the +internal Azure clients, which are used to transfer the snapshot, utilize the +latest settings from the keystore. + +NOTE: In progress snapshot or restore jobs will not be preempted by a *reload* +of the storage secure settings. They will complete using the client as it was +built when the operation started. + +The following list contains the available client settings. Those that must be +stored in the keystore are marked as "secure"; the other settings belong in the +`elasticsearch.yml` file. + +`account` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: + The Azure account name, which is used by the repository's internal Azure client. + +`endpoint_suffix`:: + The Azure endpoint suffix to connect to. The default value is + `core.windows.net`. + +`key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: + The Azure secret key, which is used by the repository's internal Azure client. + +`max_retries`:: + The number of retries to use when an Azure request fails. This setting helps + control the exponential backoff policy. It specifies the number of retries + that must occur before the snapshot fails. The default value is `3`. The + initial backoff period is defined by Azure SDK as `30s`. Thus there is `30s` + of wait time before retrying after a first timeout or failure. The maximum + backoff period is defined by Azure SDK as `90s`. + +`proxy.host`:: + The host name of a proxy to connect to Azure through. For example: `azure.client.default.proxy.host: proxy.host`. + +`proxy.port`:: + The port of a proxy to connect to Azure through. For example, `azure.client.default.proxy.port: 8888`. + +`proxy.type`:: + Register a proxy type for the client. Supported values are `direct`, `http`, + and `socks`. For example: `azure.client.default.proxy.type: http`. When + `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must + also be provided. The default value is `direct`. + +`timeout`:: + The client side timeout for any single request to Azure. The value should + specify the time unit. For example, a value of `5s` specifies a 5 second + timeout. There is no default value, which means that {es} uses the + http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value] + set by the Azure client (known as 5 minutes). This setting can be defined + globally, per account, or both. [[repository-azure-repository-settings]] ==== Repository settings @@ -117,8 +158,8 @@ The Azure repository supports following settings: `chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. - The chunk size can be specified in bytes or by using size value notation, - i.e. `1g`, `10m`, `5k`. Defaults to `64m` (64m max) + Specify the chunk size as a value and unit, for example: + `10MB`, `5KB`, `500B`. Defaults to `64MB` (64MB max). `compress`:: @@ -126,9 +167,7 @@ The Azure repository supports following settings: setting doesn't affect index files that are already compressed by default. Defaults to `false`. -`readonly`:: - - Makes repository read-only. Defaults to `false`. +include::repository-shared-settings.asciidoc[] `location_mode`:: @@ -152,7 +191,7 @@ PUT _snapshot/my_backup2 "settings": { "container": "backup-container", "base_path": "backups", - "chunk_size": "32m", + "chunk_size": "32MB", "compress": true } } diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index e3978e65f4476..bf8a1eb414007 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -11,8 +11,8 @@ include::install_remove.asciidoc[] ==== Getting started The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage] -to connect to the Storage service. If you are using -https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you +to connect to the Storage service. If you are using +https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you must connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new project. After your project is created, you must enable the Cloud Storage Service for your project. @@ -20,10 +20,10 @@ Cloud Storage Service for your project. [[repository-gcs-creating-bucket]] ===== Creating a Bucket -The Google Cloud Storage service uses the concept of a -https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all -the data. Buckets are usually created using the -https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin +The Google Cloud Storage service uses the concept of a +https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all +the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin does not automatically create buckets. To create a new bucket: @@ -43,12 +43,12 @@ https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google [[repository-gcs-service-authentication]] ===== Service Authentication -The plugin must authenticate the requests it makes to the Google Cloud Storage +The plugin must authenticate the requests it makes to the Google Cloud Storage service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials]. -However, that strategy is **not** supported for use with Elasticsearch. The -plugin operates under the Elasticsearch process, which runs with the security +However, that strategy is **not** supported for use with Elasticsearch. The +plugin operates under the Elasticsearch process, which runs with the security manager enabled. The security manager obstructs the "automatic" credential discovery. -Therefore, you must configure <> +Therefore, you must configure <> credentials even if you are using an environment that does not normally require this configuration (such as Compute Engine, Kubernetes Engine or App Engine). @@ -64,10 +64,10 @@ Here is a summary of the steps: 1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. 2. Select your project. -3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +3. Go to the https://console.cloud.google.com/permissions[Permission] tab. 4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. 5. Click *Create service account*. -6. After the account is created, select it and download a JSON key file. +6. After the account is created, select it and download a JSON key file. A JSON service account file looks like this: @@ -88,16 +88,17 @@ A JSON service account file looks like this: ---- // NOTCONSOLE -To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME` -is the name of the client configuration for the repository. The implicit client +To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must +add a `file` setting with the name `gcs.client.NAME.credentials_file` using the `add-file` subcommand. + `NAME` is the name of the client configuration for the repository. The implicit client name is `default`, but a different client name can be specified in the -repository settings with the `client` key. +repository settings with the `client` key. -NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment +NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment variable is **not** supported. -For example, if you added a `gcs.client.my_alternate_client.credentials_file` -setting in the keystore, you can configure a repository to use those credentials +For example, if you added a `gcs.client.my_alternate_client.credentials_file` +setting in the keystore, you can configure a repository to use those credentials like this: [source,js] @@ -115,11 +116,11 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, the internal `gcs` clients, which are used to +After you reload the settings, the internal `gcs` clients, which are used to transfer the snapshot contents, utilize the latest settings from the keystore. NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload* -of the client's `credentials_file` settings. They complete using the client as +of the client's `credentials_file` settings. They complete using the client as it was built when the operation started. [[repository-gcs-client]] @@ -151,15 +152,15 @@ Some settings are sensitive and must be stored in the [source,sh] ---- -bin/elasticsearch-keystore add-file gcs.client.default.credentials_file +bin/elasticsearch-keystore add-file gcs.client.default.credentials_file /path/service-account.json ---- The following are the available client settings. Those that must be stored in the keystore are marked as `Secure`. -`credentials_file`:: +`credentials_file` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The service account file that is used to authenticate to the Google Cloud Storage service. (Secure) + The service account file that is used to authenticate to the Google Cloud Storage service. `endpoint`:: @@ -231,8 +232,8 @@ The following settings are supported: `chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. - The chunk size can be specified in bytes or by using size value notation, - i.e. `1g`, `10m`, `5k`. Defaults to `100m`. + Specify the chunk size as a value and unit, for example: + `10MB` or `5KB`. Defaults to `100MB`, which is the maximum permitted. `compress`:: @@ -240,9 +241,12 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `false`. +include::repository-shared-settings.asciidoc[] + `application_name`:: - deprecated[7.0.0, This setting is now defined in the <>] + deprecated:[6.3.0, "This setting is now defined in the <>."] + Name used by the client when it uses the Google Cloud Storage service. [[repository-gcs-bucket-permission]] ===== Recommended Bucket Permission diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index 005cc30895552..d903d90570b77 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -64,6 +64,8 @@ The following settings are supported: Whether to compress the metadata or not. (Disabled by default) +include::repository-shared-settings.asciidoc[] + `chunk_size`:: Override the chunk size. (Disabled by default) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index e7ab83ca6e69b..0a753163080e3 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -1,21 +1,25 @@ [[repository-s3]] === S3 Repository Plugin -The S3 repository plugin adds support for using S3 as a repository for +The S3 repository plugin adds support for using AWS S3 as a repository for {ref}/modules-snapshots.html[Snapshot/Restore]. -*If you are looking for a hosted solution of Elasticsearch on AWS, please visit http://www.elastic.co/cloud.* +*If you are looking for a hosted solution of Elasticsearch on AWS, please visit +http://www.elastic.co/cloud.* :plugin_name: repository-s3 include::install_remove.asciidoc[] [[repository-s3-usage]] -==== Getting started with AWS +==== Getting Started -The plugin provides a repository type named `s3` which may be used when creating a repository. -The repository defaults to using https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS IAM Role] or -http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2 IAM Role] -credentials for authentication. The only mandatory setting is the bucket name: +The plugin provides a repository type named `s3` which may be used when creating +a repository. The repository defaults to using +https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS +IAM Role] or +http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2 +IAM Role] credentials for authentication. The only mandatory setting is the +bucket name: [source,js] ---- @@ -30,14 +34,13 @@ PUT _snapshot/my_s3_repository // CONSOLE // TEST[skip:we don't have s3 setup while testing this] - [[repository-s3-client]] ==== Client Settings -The client that you use to connect to S3 has a number of settings available. The -settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. The default client -name that is looked up by an `s3` repository is `default`. It can be modified -using the <> `client`. For example: +The client that you use to connect to S3 has a number of settings available. +The settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. By default, +`s3` repositories use a client named `default`, but this can be modified using +the <> `client`. For example: [source,js] ---- @@ -51,20 +54,35 @@ PUT _snapshot/my_s3_repository } ---- // CONSOLE -// TEST[skip:we don't have s3 setup while testing this] +// TEST[skip:we don't have S3 setup while testing this] Most client settings can be added to the `elasticsearch.yml` configuration file with the exception of the secure settings, which you add to the {es} keystore. For more information about creating and updating the {es} keystore, see {ref}/secure-settings.html[Secure settings]. -For example, before you start the node, run these commands to add AWS access key -settings to the keystore: +For example, if you want to use specific credentials to access S3 then run the +following commands to add these credentials to the keystore: [source,sh] ---- bin/elasticsearch-keystore add s3.client.default.access_key bin/elasticsearch-keystore add s3.client.default.secret_key +# a session token is optional so the following command may not be needed +bin/elasticsearch-keystore add s3.client.default.session_token +---- + +If instead you want to use the instance role or container role to access S3 +then you should leave these settings unset. You can switch from using specific +credentials back to the default of using the instance role or container role by +removing these settings from the keystore as follows: + +[source,sh] +---- +bin/elasticsearch-keystore remove s3.client.default.access_key +bin/elasticsearch-keystore remove s3.client.default.secret_key +# a session token is optional so the following command may not be needed +bin/elasticsearch-keystore remove s3.client.default.session_token ---- *All* client secure settings of this plugin are @@ -74,71 +92,97 @@ contents, will utilize the latest settings from the keystore. Any existing `s3` repositories, as well as any newly created ones, will pick up the new values stored in the keystore. -NOTE: In progress snapshot/restore tasks will not be preempted by a *reload* -of the client's secure settings. The task will complete using the client as it -was built when the operation started. +NOTE: In-progress snapshot/restore tasks will not be preempted by a *reload* of +the client's secure settings. The task will complete using the client as it was +built when the operation started. The following list contains the available client settings. Those that must be stored in the keystore are marked as "secure" and are *reloadable*; the other settings belong in the `elasticsearch.yml` file. -`access_key` ({ref}/secure-settings.html[Secure]):: +`access_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - An s3 access key. The `secret_key` setting must also be specified. + An S3 access key. If set, the `secret_key` setting must also be specified. + If unset, the client will use the instance or container role instead. -`secret_key` ({ref}/secure-settings.html[Secure]):: +`secret_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - An s3 secret key. The `access_key` setting must also be specified. + An S3 secret key. If set, the `access_key` setting must also be specified. -`session_token`:: - An s3 session token. The `access_key` and `secret_key` settings must also - be specified. (Secure) +`session_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: + + An S3 session token. If set, the `access_key` and `secret_key` settings + must also be specified. `endpoint`:: - The s3 service endpoint to connect to. This will be automatically - figured out by the s3 client based on the bucket location, but - can be specified explicitly. See http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region. + The S3 service endpoint to connect to. This defaults to `s3.amazonaws.com` + but the + http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region[AWS + documentation] lists alternative S3 endpoints. If you are using an + <> then you should + set this to the service's endpoint. `protocol`:: - The protocol to use to connect to s3. Valid values are either `http` - or `https`. Defaults to `https`. + The protocol to use to connect to S3. Valid values are either `http` or + `https`. Defaults to `https`. `proxy.host`:: - The host name of a proxy to connect to s3 through. + The host name of a proxy to connect to S3 through. `proxy.port`:: - The port of a proxy to connect to s3 through. + The port of a proxy to connect to S3 through. -`proxy.username` ({ref}/secure-settings.html[Secure]):: +`proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The username to connect to the `proxy.host` with. + The username to connect to the `proxy.host` with. -`proxy.password` ({ref}/secure-settings.html[Secure]):: +`proxy.password` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable]):: - The password to connect to the `proxy.host` with. + The password to connect to the `proxy.host` with. `read_timeout`:: - The socket timeout for connecting to s3. The value should specify the unit. For example, - a value of `5s` specifies a 5 second timeout. The default value is 50 seconds. + The socket timeout for connecting to S3. The value should specify the unit. + For example, a value of `5s` specifies a 5 second timeout. The default value + is 50 seconds. `max_retries`:: - The number of retries to use when an s3 request fails. The default value is 3. + The number of retries to use when an S3 request fails. The default value is + `3`. `use_throttle_retries`:: - Whether retries should be throttled (ie use backoff). Must be `true` or `false`. Defaults to `true`. + Whether retries should be throttled (i.e. should back off). Must be `true` + or `false`. Defaults to `true`. + +[float] +[[repository-s3-compatible-services]] +===== S3-compatible services + +There are a number of storage systems that provide an S3-compatible API, and +the `repository-s3` plugin allows you to use these systems in place of AWS S3. +To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the +system's endpoint. This setting accepts IP addresses and hostnames and may +include a port. For example, the endpoint may be `172.17.0.2` or +`172.17.0.2:9000`. You may also need to set `s3.client.CLIENT_NAME.protocol` to +`http` if the endpoint does not support HTTPS. + +https://minio.io[Minio] is an example of a storage system that provides an +S3-compatible API. The `repository-s3` plugin allows {es} to work with +Minio-backed repositories as well as repositories stored on AWS S3. Other +S3-compatible storage systems may also work with {es}, but these are not tested +or supported. [[repository-s3-repository]] ==== Repository Settings -The `s3` repository type supports a number of settings to customize how data is stored in S3. -These can be specified when creating the repository. For example: +The `s3` repository type supports a number of settings to customize how data is +stored in S3. These can be specified when creating the repository. For example: [source,js] ---- @@ -152,7 +196,7 @@ PUT _snapshot/my_s3_repository } ---- // CONSOLE -// TEST[skip:we don't have s3 set up while testing this] +// TEST[skip:we don't have S3 set up while testing this] The following settings are supported: @@ -162,21 +206,21 @@ The following settings are supported: `client`:: - The name of the s3 client to use to connect to S3. Defaults to `default`. + The name of the <> to use to connect to S3. + Defaults to `default`. `base_path`:: - Specifies the path within bucket to repository data. Defaults to - value of `repositories.s3.base_path` or to root directory if not set. - Previously, the base_path could take a leading `/` (forward slash). - However, this has been deprecated and setting the base_path now should - omit the leading `/`. + Specifies the path within bucket to repository data. Defaults to value of + `repositories.s3.base_path` or to root directory if not set. Previously, + the base_path could take a leading `/` (forward slash). However, this has + been deprecated and setting the base_path now should omit the leading `/`. `chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. - The chunk size can be specified in bytes or by using size value notation, - i.e. `1gb`, `10mb`, `5kb`. Defaults to `1gb`. + Specify the chunk size as a value and unit, for example: + `1GB`, `10MB`, `5KB`, `500B`. Defaults to `1GB`. `compress`:: @@ -184,6 +228,8 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `false`. +include::repository-shared-settings.asciidoc[] + `server_side_encryption`:: When set to `true` files are encrypted on server side using AES256 @@ -191,41 +237,49 @@ The following settings are supported: `buffer_size`:: - Minimum threshold below which the chunk is uploaded using a single - request. Beyond this threshold, the S3 repository will use the - http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] - to split the chunk into several parts, each of `buffer_size` length, and - to upload each part in its own request. Note that setting a buffer - size lower than `5mb` is not allowed since it will prevent the use of the - Multipart API and may result in upload errors. It is also not possible to - set a buffer size greater than `5gb` as it is the maximum upload size - allowed by S3. Defaults to the minimum between `100mb` and `5%` of the heap size. + Minimum threshold below which the chunk is uploaded using a single request. + Beyond this threshold, the S3 repository will use the + http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS + Multipart Upload API] to split the chunk into several parts, each of + `buffer_size` length, and to upload each part in its own request. Note that + setting a buffer size lower than `5mb` is not allowed since it will prevent + the use of the Multipart API and may result in upload errors. It is also not + possible to set a buffer size greater than `5gb` as it is the maximum upload + size allowed by S3. Defaults to the minimum between `100mb` and `5%` of the + heap size. `canned_acl`:: - The S3 repository supports all http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl[S3 canned ACLs] - : `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, - `bucket-owner-read`, `bucket-owner-full-control`. Defaults to `private`. - You could specify a canned ACL using the `canned_acl` setting. When the S3 repository - creates buckets and objects, it adds the canned ACL into the buckets and objects. + The S3 repository supports all + http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl[S3 + canned ACLs] : `private`, `public-read`, `public-read-write`, + `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, + `bucket-owner-full-control`. Defaults to `private`. You could specify a + canned ACL using the `canned_acl` setting. When the S3 repository creates + buckets and objects, it adds the canned ACL into the buckets and objects. `storage_class`:: Sets the S3 storage class for objects stored in the snapshot repository. - Values may be `standard`, `reduced_redundancy`, `standard_ia`. - Defaults to `standard`. Changing this setting on an existing repository - only affects the storage class for newly created objects, resulting in a - mixed usage of storage classes. Additionally, S3 Lifecycle Policies can - be used to manage the storage class of existing objects. - Due to the extra complexity with the Glacier class lifecycle, it is not - currently supported by the plugin. For more information about the - different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] - -NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated: - -In addition to the above settings, you may also specify all non-secure client settings in the repository settings. -In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. -Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. + Values may be `standard`, `reduced_redundancy`, `standard_ia`. Defaults to + `standard`. Changing this setting on an existing repository only affects the + storage class for newly created objects, resulting in a mixed usage of + storage classes. Additionally, S3 Lifecycle Policies can be used to manage + the storage class of existing objects. Due to the extra complexity with the + Glacier class lifecycle, it is not currently supported by the plugin. For + more information about the different classes, see + http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS + Storage Classes Guide] + +NOTE: The option of defining client settings in the repository settings as +documented below is considered deprecated, and will be removed in a future +version. + +In addition to the above settings, you may also specify all non-secure client +settings in the repository settings. In this case, the client settings found in +the repository settings will be merged with those of the named client used by +the repository. Conflicts between client and repository settings are resolved +by the repository settings taking precedence over client settings. For example: @@ -244,16 +298,19 @@ PUT _snapshot/my_s3_repository // CONSOLE // TEST[skip:we don't have s3 set up while testing this] -This sets up a repository that uses all client settings from the client `my_client_named` except for the `endpoint` that is overridden -to `my.s3.endpoint` by the repository settings. +This sets up a repository that uses all client settings from the client +`my_client_name` except for the `endpoint` that is overridden to +`my.s3.endpoint` by the repository settings. [[repository-s3-permissions]] ===== Recommended S3 Permissions -In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon -IAM in conjunction with pre-existing S3 buckets. Here is an example policy which will allow the snapshot access to an - S3 bucket named "snaps.example.com". This may be configured through the AWS IAM console, by creating a Custom Policy, - and using a Policy Document similar to this (changing snaps.example.com to your bucket name). +In order to restrict the Elasticsearch snapshot process to the minimum required +resources, we recommend using Amazon IAM in conjunction with pre-existing S3 +buckets. Here is an example policy which will allow the snapshot access to an S3 +bucket named "snaps.example.com". This may be configured through the AWS IAM +console, by creating a Custom Policy, and using a Policy Document similar to +this (changing snaps.example.com to your bucket name). [source,js] ---- @@ -290,7 +347,8 @@ IAM in conjunction with pre-existing S3 buckets. Here is an example policy which ---- // NOTCONSOLE -You may further restrict the permissions by specifying a prefix within the bucket, in this example, named "foo". +You may further restrict the permissions by specifying a prefix within the +bucket, in this example, named "foo". [source,js] ---- @@ -334,16 +392,23 @@ You may further restrict the permissions by specifying a prefix within the bucke ---- // NOTCONSOLE -The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository -registration will fail. +The bucket needs to exist to register a repository for snapshots. If you did not +create the bucket then the repository registration will fail. -Note: Starting in version 7.0, all bucket operations are using the path style access pattern. In previous versions the decision to use virtual hosted style -or path style access was made by the AWS Java SDK. +Note: Starting in version 7.0, all bucket operations are using the path style +access pattern. In previous versions the decision to use virtual hosted style or +path style access was made by the AWS Java SDK. [[repository-s3-aws-vpc]] [float] ==== AWS VPC Bandwidth Settings -AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch instances reside in a private subnet in an AWS VPC then all traffic to S3 will go through that VPC's NAT instance. If your VPC's NAT instance is a smaller instance size (e.g. a t1.micro) or is handling a high volume of network traffic your bandwidth to S3 may be limited by that NAT instance's networking bandwidth limitations. +AWS instances resolve S3 endpoints to a public IP. If the Elasticsearch +instances reside in a private subnet in an AWS VPC then all traffic to S3 will +go through that VPC's NAT instance. If your VPC's NAT instance is a smaller +instance size (e.g. a t1.micro) or is handling a high volume of network traffic +your bandwidth to S3 may be limited by that NAT instance's networking bandwidth +limitations. -Instances residing in a public subnet in an AWS VPC will connect to S3 via the VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. +Instances residing in a public subnet in an AWS VPC will connect to S3 via the +VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance. diff --git a/docs/plugins/repository-shared-settings.asciidoc b/docs/plugins/repository-shared-settings.asciidoc new file mode 100644 index 0000000000000..ca9345e0ffc2c --- /dev/null +++ b/docs/plugins/repository-shared-settings.asciidoc @@ -0,0 +1,11 @@ +`max_restore_bytes_per_sec`:: + + Throttles per node restore rate. Defaults to `40mb` per second. + +`max_snapshot_bytes_per_sec`:: + + Throttles per node snapshot rate. Defaults to `40mb` per second. + +`readonly`:: + + Makes repository read-only. Defaults to `false`. \ No newline at end of file diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc index ea1b6a837b58f..8def003e7c2ab 100644 --- a/docs/python/index.asciidoc +++ b/docs/python/index.asciidoc @@ -58,8 +58,8 @@ The recommended way to set your requirements in your `setup.py` or [source,txt] ------------------------------------ - # Elasticsearch 5.x - elasticsearch>=5.0.0,<6.0.0 + # Elasticsearch 6.x + elasticsearch>=6.0.0,<7.0.0 # Elasticsearch 2.x elasticsearch2 diff --git a/docs/reference/administering.asciidoc b/docs/reference/administering.asciidoc new file mode 100644 index 0000000000000..0525d93a27fa1 --- /dev/null +++ b/docs/reference/administering.asciidoc @@ -0,0 +1,25 @@ +[[administer-elasticsearch]] += Administering {es} + +[partintro] +-- +Elasticsearch is a complex piece of software, with many moving parts. There are +many APIs and features that are designed to help you manage your Elasticsearch +cluster. + +-- + +[[backup-cluster]] +== Back up a cluster + +As with any software that stores data, it is important to routinely back up your +data. {es} replicas provide high availability during runtime; they enable you to +tolerate sporadic node loss without an interruption of service. + +Replicas do not provide protection from catastrophic failure, however. For that, +you need a real backup of your cluster—a complete copy in case something goes +wrong. + +To back up your cluster, you can use the <>. + +include::{es-repo-dir}/modules/snapshots.asciidoc[tag=snapshot-intro] \ No newline at end of file diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index 52b27c578929f..b9fbddc65c125 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -39,6 +39,8 @@ include::bucket/geodistance-aggregation.asciidoc[] include::bucket/geohashgrid-aggregation.asciidoc[] +include::bucket/geotilegrid-aggregation.asciidoc[] + include::bucket/global-aggregation.asciidoc[] include::bucket/histogram-aggregation.asciidoc[] @@ -62,4 +64,3 @@ include::bucket/significantterms-aggregation.asciidoc[] include::bucket/significanttext-aggregation.asciidoc[] include::bucket/terms-aggregation.asciidoc[] - diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc index e371674228bb4..dfc4f62a91b09 100644 --- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc @@ -89,7 +89,7 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -=== Intervals +==== Intervals The interval of the returned buckets is selected based on the data collected by the aggregation so that the number of buckets returned is less than or equal to the number diff --git a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc index 4b172402da9ec..f78089d1a9bf6 100644 --- a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc @@ -110,68 +110,149 @@ bucket, as if they had a date value of "1899-12-31". ==== Date Format/Pattern NOTE: this information was copied from -http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[JodaDate] +https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[DateTimeFormatter] All ASCII letters are reserved as format pattern letters, which are defined as follows: [options="header"] |======= -|Symbol |Meaning |Presentation |Examples -|G |era |text |AD -|C |century of era (>=0) |number |20 -|Y |year of era (>=0) |year |1996 - -|x |weekyear |year |1996 -|w |week of weekyear |number |27 -|e |day of week |number |2 -|E |day of week |text |Tuesday; Tue - -|y |year |year |1996 -|D |day of year |number |189 -|M |month of year |month |July; Jul; 07 -|d |day of month |number |10 - -|a |halfday of day |text |PM -|K |hour of halfday (0~11) |number |0 -|h |clockhour of halfday (1~12) |number |12 - -|H |hour of day (0~23) |number |0 -|k |clockhour of day (1~24) |number |24 -|m |minute of hour |number |30 -|s |second of minute |number |55 -|S |fraction of second |number |978 - -|z |time zone |text |Pacific Standard Time; PST -|Z |time zone offset/id |zone |-0800; -08:00; America/Los_Angeles - -|' |escape for text |delimiter -|'' |single quote |literal |' +|Symbol |Meaning |Presentation |Examples +|G |era |text |AD; Anno Domini; A +|u |year |year |2004; 04 +|y |year-of-era |year |2004; 04 +|D |day-of-year |number |189 +|M/L |month-of-year |number/text |7; 07; Jul; July; J +|d |day-of-month |number |10 + +|Q/q |quarter-of-year |number/text |3; 03; Q3; 3rd quarter +|Y |week-based-year |year |1996; 96 +|w |week-of-week-based-year |number |27 +|W |week-of-month |number |4 +|E |day-of-week |text |Tue; Tuesday; T +|e/c |localized day-of-week |number/text |2; 02; Tue; Tuesday; T +|F |week-of-month |number |3 + +|a |am-pm-of-day |text |PM +|h |clock-hour-of-am-pm (1-12) |number |12 +|K |hour-of-am-pm (0-11) |number |0 +|k |clock-hour-of-am-pm (1-24) |number |0 + +|H |hour-of-day (0-23) |number |0 +|m |minute-of-hour |number |30 +|s |second-of-minute |number |55 +|S |fraction-of-second |fraction |978 +|A |milli-of-day |number |1234 +|n |nano-of-second |number |987654321 +|N |nano-of-day |number |1234000000 + +|V |time-zone ID |zone-id |America/Los_Angeles; Z; -08:30 +|z |time-zone name |zone-name |Pacific Standard Time; PST +|O |localized zone-offset |offset-O |GMT+8; GMT+08:00; UTC-08:00; +|X |zone-offset 'Z' for zero |offset-X |Z; -08; -0830; -08:30; -083015; -08:30:15; +|x |zone-offset |offset-x |+0000; -08; -0830; -08:30; -083015; -08:30:15; +|Z |zone-offset |offset-Z |+0000; -0800; -08:00; + +|p |pad next |pad modifier |1 +|' |escape for text |delimiter +|'' |single quote |literal |' +|[ |optional section start +|] |optional section end +|# |reserved for future use +|{ |reserved for future use +|} |reserved for future use |======= -The count of pattern letters determine the format. +The count of pattern letters determines the format. + +Text:: The text style is determined based on the number of pattern letters +used. Less than 4 pattern letters will use the short form. Exactly 4 +pattern letters will use the full form. Exactly 5 pattern letters will use +the narrow form. Pattern letters `L`, `c`, and `q` specify the stand-alone +form of the text styles. + +Number:: If the count of letters is one, then the value is output using +the minimum number of digits and without padding. Otherwise, the count of +digits is used as the width of the output field, with the value +zero-padded as necessary. The following pattern letters have constraints +on the count of letters. Only one letter of `c` and `F` can be specified. +Up to two letters of `d`, `H`, `h`, `K`, `k`, `m`, and `s` can be +specified. Up to three letters of `D` can be specified. + +Number/Text:: If the count of pattern letters is 3 or greater, use the +Text rules above. Otherwise use the Number rules above. + +Fraction:: Outputs the nano-of-second field as a fraction-of-second. The +nano-of-second value has nine digits, thus the count of pattern letters is +from 1 to 9. If it is less than 9, then the nano-of-second value is +truncated, with only the most significant digits being output. + +Year:: The count of letters determines the minimum field width below which +padding is used. If the count of letters is two, then a reduced two digit +form is used. For printing, this outputs the rightmost two digits. For +parsing, this will parse using the base value of 2000, resulting in a year +within the range 2000 to 2099 inclusive. If the count of letters is less +than four (but not two), then the sign is only output for negative years +as per `SignStyle.NORMAL`. Otherwise, the sign is output if the pad width is +exceeded, as per `SignStyle.EXCEEDS_PAD`. + +ZoneId:: This outputs the time-zone ID, such as `Europe/Paris`. If the +count of letters is two, then the time-zone ID is output. Any other count +of letters throws `IllegalArgumentException`. + +Zone names:: This outputs the display name of the time-zone ID. If the +count of letters is one, two or three, then the short name is output. If +the count of letters is four, then the full name is output. Five or more +letters throws `IllegalArgumentException`. + +Offset X and x:: This formats the offset based on the number of pattern +letters. One letter outputs just the hour, such as `+01`, unless the +minute is non-zero in which case the minute is also output, such as +`+0130`. Two letters outputs the hour and minute, without a colon, such as +`+0130`. Three letters outputs the hour and minute, with a colon, such as +`+01:30`. Four letters outputs the hour and minute and optional second, +without a colon, such as `+013015`. Five letters outputs the hour and +minute and optional second, with a colon, such as `+01:30:15`. Six or +more letters throws `IllegalArgumentException`. Pattern letter `X` (upper +case) will output `Z` when the offset to be output would be zero, +whereas pattern letter `x` (lower case) will output `+00`, `+0000`, or +`+00:00`. + +Offset O:: This formats the localized offset based on the number of +pattern letters. One letter outputs the short form of the localized +offset, which is localized offset text, such as `GMT`, with hour without +leading zero, optional 2-digit minute and second if non-zero, and colon, +for example `GMT+8`. Four letters outputs the full form, which is +localized offset text, such as `GMT, with 2-digit hour and minute +field, optional second field if non-zero, and colon, for example +`GMT+08:00`. Any other count of letters throws +`IllegalArgumentException`. + +Offset Z:: This formats the offset based on the number of pattern letters. +One, two or three letters outputs the hour and minute, without a colon, +such as `+0130`. The output will be `+0000` when the offset is zero. +Four letters outputs the full form of localized offset, equivalent to +four letters of Offset-O. The output will be the corresponding localized +offset text if the offset is zero. Five letters outputs the hour, +minute, with optional second if non-zero, with colon. It outputs `Z` if +the offset is zero. Six or more letters throws IllegalArgumentException. + +Optional section:: The optional section markers work exactly like calling +`DateTimeFormatterBuilder.optionalStart()` and +`DateTimeFormatterBuilder.optionalEnd()`. + +Pad modifier:: Modifies the pattern that immediately follows to be padded +with spaces. The pad width is determined by the number of pattern letters. +This is the same as calling `DateTimeFormatterBuilder.padNext(int)`. + +For example, `ppH` outputs the hour-of-day padded on the left with spaces to a width of 2. + +Any unrecognized letter is an error. Any non-letter character, other than +`[`, `]`, `{`, `}`, `#` and the single quote will be output directly. +Despite this, it is recommended to use single quotes around all characters +that you want to output directly to ensure that future changes do not +break your application. -Text:: If the number of pattern letters is 4 or more, the full form is used; -otherwise a short or abbreviated form is used if available. - -Number:: The minimum number of digits. Shorter numbers are zero-padded to -this amount. - -Year:: Numeric presentation for year and weekyear fields are handled -specially. For example, if the count of 'y' is 2, the year will be displayed -as the zero-based year of the century, which is two digits. - -Month:: 3 or over, use text, otherwise use number. - -Zone:: 'Z' outputs offset without a colon, 'ZZ' outputs the offset with a -colon, 'ZZZ' or more outputs the zone id. - -Zone names:: Time zone names ('z') cannot be parsed. - -Any characters in the pattern that are not in the ranges of ['a'..'z'] and -['A'..'Z'] will be treated as quoted text. For instance, characters like ':', - '.', ' ', '#' and '?' will appear in the resulting time text even they are - not embraced within single quotes. [[time-zones]] ==== Time zone in date range aggregations @@ -180,8 +261,7 @@ Dates can be converted from another time zone to UTC by specifying the `time_zone` parameter. Time zones may either be specified as an ISO 8601 UTC offset (e.g. +01:00 or --08:00) or as one of the http://www.joda.org/joda-time/timezones.html [time -zone ids] from the TZ database. +-08:00) or as one of the time zone ids from the TZ database. The `time_zone` parameter is also applied to rounding in date math expressions. As an example, to round to the beginning of the day in the CET time zone, you @@ -198,7 +278,7 @@ POST /sales/_search?size=0 "time_zone": "CET", "ranges": [ { "to": "2016/02/01" }, <1> - { "from": "2016/02/01", "to" : "now/d" <2>}, + { "from": "2016/02/01", "to" : "now/d" }, <2> { "from": "now/d" } ] } @@ -209,7 +289,7 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -<1> This date will be converted to `2016-02-15T00:00:00.000+01:00`. +<1> This date will be converted to `2016-02-01T00:00:00.000+01:00`. <2> `now/d` will be rounded to the beginning of the day in the CET time zone. ==== Keyed Response diff --git a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc index 07d8261d200b4..edd6c6163da85 100644 --- a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc @@ -186,6 +186,7 @@ Please note that Elasticsearch will ignore the choice of execution hint if it is ==== Limitations +[[div-sampler-breadth-first-nested-agg]] ===== Cannot be nested under `breadth_first` aggregations Being a quality-based filter the diversified_sampler aggregation needs access to the relevance score produced for each document. It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores. @@ -194,6 +195,7 @@ In this situation an error will be thrown. ===== Limited de-dup logic. The de-duplication logic applies only at a shard level so will not apply across shards. +[[spec-syntax-geo-date-fields]] ===== No specialized syntax for geo/date fields Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "7d" (7 diff --git a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc index 94b91654f0c7f..4e83c14a18ba7 100644 --- a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc @@ -118,6 +118,7 @@ request. The response for this example would be: // TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] // TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] +[[other-bucket]] ==== `Other` Bucket The `other_bucket` parameter can be set to add a bucket to the response which will contain all documents that do diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index 2576662b6d13f..32a540130ef35 100644 --- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -143,7 +143,7 @@ the first bucket you'll get will be the one with `100` as its key. This is confu to get those buckets between `0 - 100`. With `extended_bounds` setting, you now can "force" the histogram aggregation to start building buckets on a specific -`min` values and also keep on building buckets up to a `max` value (even if there are no documents anymore). Using +`min` value and also keep on building buckets up to a `max` value (even if there are no documents anymore). Using `extended_bounds` only makes sense when `min_doc_count` is 0 (the empty buckets will never be returned if `min_doc_count` is greater than 0). @@ -185,8 +185,10 @@ the `order` setting. Supports the same `order` functionality as the < "type" : "nested", "properties" : { - "name" : { "type" : "text" }, + "reseller" : { "type" : "text" }, "price" : { "type" : "double" } } } @@ -24,14 +24,40 @@ PUT /index } -------------------------------------------------- // CONSOLE -// TESTSETUP -<1> The `resellers` is an array that holds nested documents under the `product` object. -The following aggregations will return the minimum price products can be purchased in: +<1> `resellers` is an array that holds nested documents. + +The following request adds a product with two resellers: + +[source,js] +-------------------------------------------------- +PUT /products/_doc/0 +{ + "name": "LED TV", <1> + "resellers": [ + { + "reseller": "companyA", + "price": 350 + }, + { + "reseller": "companyB", + "price": 500 + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[s/PUT \/products\/_doc\/0/PUT \/products\/_doc\/0\?refresh/] +// TEST[continued] + +<1> We are using a dynamic mapping for the `name` attribute. + + +The following request returns the minimum price a product can be purchased for: [source,js] -------------------------------------------------- -GET /_search +GET /products/_search { "query" : { "match" : { "name" : "led tv" } @@ -49,8 +75,8 @@ GET /_search } -------------------------------------------------- // CONSOLE -// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/] -// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/] +// TEST[s/GET \/products\/_search/GET \/products\/_search\?filter_path=aggregations/] +// TEST[continued] As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents. Then one can define any type of aggregation over these nested documents. @@ -63,7 +89,7 @@ Response: ... "aggregations": { "resellers": { - "doc_count": 0, + "doc_count": 2, "min_price": { "value": 350 } diff --git a/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc index c5ac91e9d3ad8..d3a255be1d5fe 100644 --- a/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc @@ -155,6 +155,7 @@ The default value is 100. ==== Limitations +[[sampler-breadth-first-nested-agg]] ===== Cannot be nested under `breadth_first` aggregations Being a quality-based filter the sampler aggregation needs access to the relevance score produced for each document. It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores. diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index e29fbac0c5649..5fb89b6119aec 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -436,6 +436,7 @@ Available parameters in the script are `_subset_size`:: Number of documents in the subset. `_superset_size`:: Number of documents in the superset. +[[sig-terms-shard-size]] ===== Size & Shard Size The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By diff --git a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc index 429c822d3623d..3a9221c3c4372 100644 --- a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc @@ -92,7 +92,7 @@ It only occurs 5 times in our index as a whole (see the `bg_count`) and yet 4 of were lucky enough to appear in our 100 document sample of "bird flu" results. That suggests a significant word and one which the user can potentially add to their search. - +[[filter-duplicate-text-noisy-data]] ==== Dealing with noisy data using `filter_duplicate_text` Free-text fields often contain a mix of original content and mechanical copies of text (cut-and-paste biographies, email reply chains, retweets, boilerplate headers/footers, page navigation menus, sidebar news links, copyright notices, standard disclaimers, addresses). @@ -353,7 +353,7 @@ However, the `size` and `shard size` settings covered in the next section provid This aggregation supports the same scoring heuristics (JLH, mutual_information, gnd, chi_square etc) as the <> aggregation - +[[sig-text-shard-size]] ===== Size & Shard Size The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 8c0e586d8b2f6..0919e4c730e62 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -59,13 +59,15 @@ GET /_search { "aggs" : { "genres" : { - "terms" : { "field" : "genre" } + "terms" : { "field" : "genre" } <1> } } } -------------------------------------------------- // CONSOLE // TEST[s/_search/_search\?filter_path=aggregations/] +<1> `terms` aggregation should be a field of type `keyword` or any other data type suitable for bucket aggregations. In order to use it with `text` you will need to enable +<>. Response: @@ -160,7 +162,7 @@ respective document counts in brackets: | 6 | Product F (2) | Product H (14) | Product H (28) | 7 | Product G (2) | Product I (10) | Product Q (2) | 8 | Product H (2) | Product Q (6) | Product D (1) -| 9 | Product I (1) | Product J (8) | +| 9 | Product I (1) | Product J (6) | | 10 | Product J (1) | Product C (4) | |========================================================= diff --git a/docs/reference/aggregations/metrics/weighted-avg-aggregation.asciidoc b/docs/reference/aggregations/metrics/weighted-avg-aggregation.asciidoc index 252728a6db367..70804a7904f1f 100644 --- a/docs/reference/aggregations/metrics/weighted-avg-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/weighted-avg-aggregation.asciidoc @@ -12,7 +12,9 @@ As a formula, a weighted average is the `∑(value * weight) / ∑(weight)` A regular average can be thought of as a weighted average where every value has an implicit weight of `1`. +[[weighted-avg-params]] .`weighted_avg` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`value` | The configuration for the field or script that provides the values |Required | @@ -23,7 +25,9 @@ A regular average can be thought of as a weighted average where every value has The `value` and `weight` objects have per-field specific configuration: +[[value-params]] .`value` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`field` | The field that values should be extracted from |Required | @@ -31,7 +35,9 @@ The `value` and `weight` objects have per-field specific configuration: |`script` | A script which provides the values for the document. This is mutually exclusive with `field` |Optional |=== +[[weight-params]] .`weight` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`field` | The field that weights should be extracted from |Required | diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index 274efcbce62fc..ea150b4ab6d8b 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -4,6 +4,7 @@ A sibling pipeline aggregation which calculates the (mean) average value of a specified metric in a sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. +[[avg-bucket-agg-syntax]] ==== Syntax An `avg_bucket` aggregation looks like this in isolation: @@ -18,7 +19,9 @@ An `avg_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[avg-bucket-params]] .`avg_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the average for (see <> for more diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 1825b37f0c734..7dc99ba7719cd 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -4,6 +4,7 @@ A parent pipeline aggregation which executes a script which can perform per bucket computations on specified metrics in the parent multi-bucket aggregation. The specified metric must be numeric and the script must return a numeric value. +[[bucket-script-agg-syntax]] ==== Syntax A `bucket_script` aggregation looks like this in isolation: @@ -24,8 +25,9 @@ A `bucket_script` aggregation looks like this in isolation: <1> Here, `my_var1` is the name of the variable for this buckets path to use in the script, `the_sum` is the path to the metrics to use for that variable. - +[[bucket-script-params]] .`bucket_script` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 4cc532c99c5d2..7ec19174a06e3 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -29,8 +29,9 @@ A `bucket_selector` aggregation looks like this in isolation: <1> Here, `my_var1` is the name of the variable for this buckets path to use in the script, `the_sum` is the path to the metrics to use for that variable. - +[[bucket-selector-params]] .`bucket_selector` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> diff --git a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc index 633175dbf2825..a136a6ee4d578 100644 --- a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc @@ -33,7 +33,9 @@ A `bucket_sort` aggregation looks like this in isolation: <1> Here, `sort_field_1` is the bucket path to the variable to be used as the primary sort and its order is ascending. +[[bucket-sort-params]] .`bucket_sort` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`sort` |The list of fields to sort on. See <> for more details. |Optional | diff --git a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc index 748946f8bd671..a6dff7fa84651 100644 --- a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc @@ -19,7 +19,9 @@ A `cumulative_sum` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[cumulative-sum-params]] .`cumulative_sum` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the cumulative sum for (see <> for more diff --git a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc index 8479d1f45aea1..f40ace7432d57 100644 --- a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc @@ -17,7 +17,9 @@ A `derivative` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[derivative-params]] .`derivative` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more diff --git a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc index eeef705a6468d..c35223885fce0 100644 --- a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc @@ -20,7 +20,9 @@ A `extended_stats_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[extended-stats-bucket-params]] .`extended_stats_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to calculate stats for (see <> for more diff --git a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc index 8881315f50ab4..53a3aaa28f7e5 100644 --- a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc @@ -19,7 +19,9 @@ A `max_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[max-bucket-params]] .`max_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more diff --git a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc index ad6aaa28c90dd..620cf02c714fa 100644 --- a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc @@ -19,7 +19,9 @@ A `min_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[min-bucket-params]] .`min_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 06641391ced32..adbf771c6c03e 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -1,9 +1,7 @@ [[search-aggregations-pipeline-movavg-aggregation]] === Moving Average Aggregation -deprecated[6.4.0, The Moving Average aggregation has been deprecated in favor of the more general -<>. The new Moving Function aggregation provides -all the same functionality as the Moving Average aggregation, but also provides more flexibility.] +deprecated:[6.4.0, "The Moving Average aggregation has been deprecated in favor of the more general <>. The new Moving Function aggregation provides all the same functionality as the Moving Average aggregation, but also provides more flexibility."] Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index 5745527bddd6f..f6fb25c76f662 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -24,7 +24,9 @@ A `moving_fn` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[moving-avg-params]] .`moving_avg` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |Path to the metric of interest (see <> for more details |Required | @@ -188,7 +190,9 @@ The functions are available from the `MovingFunctions` namespace. E.g. `MovingF This function accepts a collection of doubles and returns the maximum value in that window. `null` and `NaN` values are ignored; the maximum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. +[[max-params]] .`max(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the maximum @@ -229,7 +233,9 @@ POST /_search This function accepts a collection of doubles and returns the minimum value in that window. `null` and `NaN` values are ignored; the minimum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. +[[min-params]] .`min(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the minimum @@ -270,7 +276,9 @@ POST /_search This function accepts a collection of doubles and returns the sum of the values in that window. `null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. +[[sum-params]] .`sum(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of @@ -312,7 +320,9 @@ This function accepts a collection of doubles and average, then returns the stan `null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. +[[stddev-params]] .`stdDev(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the standard deviation of @@ -363,7 +373,9 @@ the values from a `simple` moving average tend to "lag" behind the real data. `null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` values. +[[unweightedavg-params]] .`unweightedAvg(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of @@ -407,7 +419,9 @@ the "lag" behind the data's mean, since older points have less influence. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. +[[linearweightedavg-params]] .`linearWeightedAvg(double[] values)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of @@ -456,7 +470,9 @@ moving average. This tends to make the moving average track the data more close `null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` values. +[[ewma-params]] .`ewma(double[] values, double alpha)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of @@ -511,7 +527,9 @@ Values are produced by multiplying the level and trend components. `null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` values. +[[holt-params]] .`holt(double[] values, double alpha)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of @@ -572,7 +590,9 @@ for future enhancements. `null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` values. +[[holtwinters-params]] .`holtWinters(double[] values, double alpha)` Parameters +[options="header"] |=== |Parameter Name |Description |`values` |The window of values to find the sum of diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index 032b6ef4e419d..456a4046c0624 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -18,7 +18,9 @@ A `percentiles_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[percentiles-bucket-params]] .`percentiles_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the percentiles for (see <> for more diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 70aea68f88c34..1506e39685845 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -46,7 +46,9 @@ A `serial_diff` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[serial-diff-params]] .`serial_diff` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |Path to the metric of interest (see <> for more details |Required | diff --git a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc index b9c52ae981f75..2c9f585ebea3a 100644 --- a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc @@ -18,7 +18,9 @@ A `stats_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[stats-bucket-params]] .`stats_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to calculate stats for (see <> for more diff --git a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc index b39cf472323c2..83e0e32135040 100644 --- a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc @@ -18,7 +18,9 @@ A `sum_bucket` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE +[[sum-bucket-params]] .`sum_bucket` Parameters +[options="header"] |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc index 0a3240df091da..e3b6aa46dd8d4 100644 --- a/docs/reference/analysis.asciidoc +++ b/docs/reference/analysis.asciidoc @@ -32,6 +32,7 @@ to the inverted index: ------ [float] +[[specify-index-time-analyzer]] === Specifying an index time analyzer Each <> field in a mapping can specify its own diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc index 92133822fa51f..4b601a0b9bd3a 100644 --- a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc @@ -162,26 +162,26 @@ PUT my_index "settings": { "analysis": { "analyzer": { - "my_custom_analyzer": { + "my_custom_analyzer": { <1> "type": "custom", "char_filter": [ - "emoticons" <1> + "emoticons" ], - "tokenizer": "punctuation", <1> + "tokenizer": "punctuation", "filter": [ "lowercase", - "english_stop" <1> + "english_stop" ] } }, "tokenizer": { - "punctuation": { <1> + "punctuation": { <2> "type": "pattern", "pattern": "[ .,!?]" } }, "char_filter": { - "emoticons": { <1> + "emoticons": { <3> "type": "mapping", "mappings": [ ":) => _happy_", @@ -190,7 +190,7 @@ PUT my_index } }, "filter": { - "english_stop": { <1> + "english_stop": { <4> "type": "stop", "stopwords": "_english_" } @@ -207,9 +207,12 @@ POST my_index/_analyze -------------------------------------------------- // CONSOLE -<1> The `emoticons` character filter, `punctuation` tokenizer and - `english_stop` token filter are custom implementations which are defined - in the same index settings. +<1> Assigns the index a default custom analyzer, `my_custom_analyzer`. This +analyzer uses a custom tokenizer, character filter, and token filter that +are defined later in the request. +<2> Defines the custom `punctuation` tokenizer. +<3> Defines the custom `emoticons` character filter. +<4> Defines the custom `english_stop` token filter. ///////////////////// diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index cc82d2eb8179f..4d053884a4b6e 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -68,7 +68,7 @@ The `fingerprint` analyzer accepts the following parameters: `stopwords`:: A pre-defined stop words list like `_english_` or an array containing a - list of stop words. Defaults to `\_none_`. + list of stop words. Defaults to `_none_`. `stopwords_path`:: diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 9a4dcbe8aaac7..099950ca39567 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -1358,7 +1358,7 @@ PUT /persian_example "char_filter": { "zero_width_spaces": { "type": "mapping", - "mappings": [ "\\u200C=> "] <1> + "mappings": [ "\\u200C=>\\u0020"] <1> } }, "filter": { diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 027f37280a67d..22fa534288ddd 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -159,7 +159,7 @@ The `pattern` analyzer accepts the following parameters: `stopwords`:: A pre-defined stop words list like `_english_` or an array containing a - list of stop words. Defaults to `\_none_`. + list of stop words. Defaults to `_none_`. `stopwords_path`:: diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index 3097ece21db23..5117763b66819 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -132,7 +132,7 @@ The `standard` analyzer accepts the following parameters: `stopwords`:: A pre-defined stop words list like `_english_` or an array containing a - list of stop words. Defaults to `\_none_`. + list of stop words. Defaults to `_none_`. `stopwords_path`:: diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 7386af902fbcc..950d67510a218 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -83,7 +83,7 @@ POST my_index/_analyze // the test framework doesn't like the $1 so we just throw it away rather than // try to get it to work properly. At least we are still testing the charfilter. -The above example produces the following term: +The above example produces the following terms: [source,text] --------------------------- diff --git a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc index be37d24f7dd7c..e460725523cf6 100644 --- a/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/edgengram-tokenfilter.asciidoc @@ -1,9 +1,9 @@ [[analysis-edgengram-tokenfilter]] === Edge NGram Token Filter -A token filter of type `edgeNGram`. +A token filter of type `edge_ngram`. -The following are settings that can be set for a `edgeNGram` token +The following are settings that can be set for a `edge_ngram` token filter type: [cols="<,<",options="header",] diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 924903b9f65a8..2ff19cebe893e 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -4,8 +4,11 @@ A token filter which removes elisions. For example, "l'avion" (the plane) will tokenized as "avion" (plane). -Accepts `articles` setting which is a set of stop words articles. For -example: +Accepts `articles` parameter which is a set of stop words articles. Also accepts +`articles_case`, which indicates whether the filter treats those articles as +case sensitive. + +For example: [source,js] -------------------------------------------------- @@ -22,6 +25,7 @@ PUT /elision_example "filter" : { "elision" : { "type" : "elision", + "articles_case": true, "articles" : ["l", "m", "t", "qu", "n", "s", "j"] } } diff --git a/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc index acc178a2741fa..53bda23d12bf9 100644 --- a/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/ngram-tokenfilter.asciidoc @@ -1,9 +1,9 @@ [[analysis-ngram-tokenfilter]] === NGram Token Filter -A token filter of type `nGram`. +A token filter of type `ngram`. -The following are settings that can be set for a `nGram` token filter +The following are settings that can be set for a `ngram` token filter type: [cols="<,<",options="header",] diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index f59e2f3f2cf88..b5d5426ff2710 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -1,6 +1,12 @@ [[analysis-stemmer-tokenfilter]] === Stemmer Token Filter +// Adds attribute for the 'minimal_portuguese' stemmer values link. +// This link contains ~, which is converted to subscript. +// This attribute prevents that substitution. +// See https://github.com/asciidoctor/asciidoctor/wiki/How-to-prevent-URLs-containing-formatting-characters-from-getting-mangled +:min-pt-stemmer-values-url: http://www.inf.ufrgs.br/~buriol/papers/Orengo_CLEF07.pdf + A filter that provides access to (almost) all of the available stemming token filters through a single unified interface. For example: @@ -158,7 +164,7 @@ Portuguese:: http://snowball.tartarus.org/algorithms/portuguese/stemmer.html[`portuguese`], http://dl.acm.org/citation.cfm?id=1141523&dl=ACM&coll=DL&CFID=179095584&CFTOKEN=80067181[*`light_portuguese`*], -http://www.inf.ufrgs.br/\~buriol/papers/Orengo_CLEF07.pdf[`minimal_portuguese`], +{min-pt-stemmer-values-url}[`minimal_portuguese`], http://www.inf.ufrgs.br/\~viviane/rslp/index.htm[`portuguese_rslp`] Romanian:: diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 3167a4342ac2d..259bf785b5b30 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -78,4 +78,4 @@ Elasticsearch provides the following predefined list of languages: `_portuguese_`, `_romanian_`, `_russian_`, `_sorani_`, `_spanish_`, `_swedish_`, `_thai_`, `_turkish_`. -For the empty stopwords list (to disable stopwords) use: `\_none_`. +For the empty stopwords list (to disable stopwords) use: `_none_`. diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 2a555d7d044da..2285c6f6e8989 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -1,8 +1,6 @@ [[analysis-synonym-graph-tokenfilter]] === Synonym Graph Token Filter -beta[] - The `synonym_graph` token filter allows to easily handle synonyms, including multi-word synonyms correctly during the analysis process. @@ -71,10 +69,10 @@ PUT /test_index } }, "filter" : { - "my_stop": { - "type" : "stop", - "stopwords": ["bar"] - }, + "my_stop": { + "type" : "stop", + "stopwords": ["bar"] + }, "synonym_graph" : { "type" : "synonym_graph", "lenient": true, @@ -95,6 +93,7 @@ set to `false` no mapping would get added as when `expand=false` the target mapp stop word. [float] +[[synonym-graph-tokenizer-ignore_case-deprecated]] ==== `tokenizer` and `ignore_case` are deprecated The `tokenizer` parameter controls the tokenizers that will be used to @@ -176,7 +175,8 @@ PUT /test_index Using `synonyms_path` to define WordNet synonyms in a file is supported as well. -=== Parsing synonym files +[float] +==== Parsing synonym files Elasticsearch will use the token filters preceding the synonym filter in a tokenizer chain to parse the entries in a synonym file. So, for example, if a @@ -187,3 +187,8 @@ multiple versions of a token may choose which version of the token to emit when parsing synonyms, e.g. `asciifolding` will only produce the folded version of the token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. + +WARNING: The synonym rules should not contain words that are removed by +a filter that appears after in the chain (a `stop` filter for instance). +Removing a term from a synonym rule breaks the matching at query time. + diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 715abdde6331d..1107236194655 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -58,10 +58,10 @@ PUT /test_index } }, "filter" : { - "my_stop": { - "type" : "stop", - "stopwords": ["bar"] - }, + "my_stop": { + "type" : "stop", + "stopwords": ["bar"] + }, "synonym" : { "type" : "synonym", "lenient": true, @@ -83,6 +83,7 @@ stop word. [float] +[[synonym-tokenizer-ignore_case-deprecated]] ==== `tokenizer` and `ignore_case` are deprecated The `tokenizer` parameter controls the tokenizers that will be used to diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc index 009b027b9ef2d..9b1b0b0ce098f 100644 --- a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc @@ -18,7 +18,7 @@ Parameters include: `generate_word_parts`:: If `true` causes parts of words to be - generated: "PowerShot" => "Power" "Shot". Defaults to `true`. + generated: "Power-Shot", "(Power,Shot)" => "Power" "Shot". Defaults to `true`. `generate_number_parts`:: If `true` causes number subwords to be diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index d6f15ded05fab..628afebfdcbb8 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -155,3 +155,7 @@ include::tokenizers/simplepattern-tokenizer.asciidoc[] include::tokenizers/simplepatternsplit-tokenizer.asciidoc[] include::tokenizers/pathhierarchy-tokenizer.asciidoc[] + +include::tokenizers/pathhierarchy-tokenizer-examples.asciidoc[] + + diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc new file mode 100644 index 0000000000000..ee02d66e4034b --- /dev/null +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc @@ -0,0 +1,191 @@ +[[analysis-pathhierarchy-tokenizer-examples]] +=== Path Hierarchy Tokenizer Examples + +A common use-case for the `path_hierarchy` tokenizer is filtering results by +file paths. If indexing a file path along with the data, the use of the +`path_hierarchy` tokenizer to analyze the path allows filtering the results +by different parts of the file path string. + + +This example configures an index to have two custom analyzers and applies +those analyzers to multifields of the `file_path` text field that will +store filenames. One of the two analyzers uses reverse tokenization. +Some sample documents are then indexed to represent some file paths +for photos inside photo folders of two different users. + + +[source,js] +-------------------------------------------------- +PUT file-path-test +{ + "settings": { + "analysis": { + "analyzer": { + "custom_path_tree": { + "tokenizer": "custom_hierarchy" + }, + "custom_path_tree_reversed": { + "tokenizer": "custom_hierarchy_reversed" + } + }, + "tokenizer": { + "custom_hierarchy": { + "type": "path_hierarchy", + "delimiter": "/" + }, + "custom_hierarchy_reversed": { + "type": "path_hierarchy", + "delimiter": "/", + "reverse": "true" + } + } + } + }, + "mappings": { + "properties": { + "file_path": { + "type": "text", + "fields": { + "tree": { + "type": "text", + "analyzer": "custom_path_tree" + }, + "tree_reversed": { + "type": "text", + "analyzer": "custom_path_tree_reversed" + } + } + } + } + } +} + +POST file-path-test/_doc/1 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} + +POST file-path-test/_doc/2 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo2.jpg" +} + +POST file-path-test/_doc/3 +{ + "file_path": "/User/alice/photos/2017/05/16/my_photo3.jpg" +} + +POST file-path-test/_doc/4 +{ + "file_path": "/User/alice/photos/2017/05/15/my_photo1.jpg" +} + +POST file-path-test/_doc/5 +{ + "file_path": "/User/bob/photos/2017/05/16/my_photo1.jpg" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP + + +A search for a particular file path string against the text field matches all +the example documents, with Bob's documents ranking highest due to `bob` also +being one of the terms created by the standard analyzer boosting relevance for +Bob's documents. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "match": { + "file_path": "/User/bob/photos/2017/05" + } + } +} +-------------------------------------------------- +// CONSOLE + + +It's simple to match or filter documents with file paths that exist within a +particular directory using the `file_path.tree` field. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "term": { + "file_path.tree": "/User/alice/photos/2017/05/16" + } + } +} +-------------------------------------------------- +// CONSOLE + +With the reverse parameter for this tokenizer, it's also possible to match +from the other end of the file path, such as individual file names or a deep +level subdirectory. The following example shows a search for all files named +`my_photo1.jpg` within any directory via the `file_path.tree_reversed` field +configured to use the reverse parameter in the mapping. + + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "term": { + "file_path.tree_reversed": { + "value": "my_photo1.jpg" + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +Viewing the tokens generated with both forward and reverse is instructive +in showing the tokens created for the same file path value. + + +[source,js] +-------------------------------------------------- +POST file-path-test/_analyze +{ + "analyzer": "custom_path_tree", + "text": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} + +POST file-path-test/_analyze +{ + "analyzer": "custom_path_tree_reversed", + "text": "/User/alice/photos/2017/05/16/my_photo1.jpg" +} +-------------------------------------------------- +// CONSOLE + + +It's also useful to be able to filter with file paths when combined with other +types of searches, such as this example looking for any files paths with `16` +that also must be in Alice's photo directory. + +[source,js] +-------------------------------------------------- +GET file-path-test/_search +{ + "query": { + "bool" : { + "must" : { + "match" : { "file_path" : "16" } + }, + "filter": { + "term" : { "file_path.tree" : "/User/alice" } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc index 55aa7d66da343..8d425197a2a6d 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc @@ -170,3 +170,7 @@ If we were to set `reverse` to `true`, it would produce the following: --------------------------- [ one/two/three/, two/three/, three/ ] --------------------------- + +[float] +=== Detailed Examples +See <>. diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 96a01bbeb5d9e..9e0578ee52897 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -20,39 +20,40 @@ API, unless otherwise specified. Most APIs that refer to an `index` parameter support execution across multiple indices, using simple `test1,test2,test3` notation (or `_all` for all indices). It also -support wildcards, for example: `test*` or `*test` or `te*t` or `*test*`, and the +supports wildcards, for example: `test*` or `*test` or `te*t` or `*test*`, and the ability to "exclude" (`-`), for example: `test*,-test3`. -All multi indices API support the following url query string parameters: +All multi indices APIs support the following url query string parameters: +[horizontal] `ignore_unavailable`:: -Controls whether to ignore if any specified indices are unavailable, this -includes indices that don't exist or closed indices. Either `true` or `false` +Controls whether to ignore if any specified indices are unavailable, +including indices that don't exist or closed indices. Either `true` or `false` can be specified. `allow_no_indices`:: -Controls whether to fail if a wildcard indices expressions results into no +Controls whether to fail if a wildcard indices expression results in no concrete indices. Either `true` or `false` can be specified. For example if the wildcard expression `foo*` is specified and no indices are available that -start with `foo` then depending on this setting the request will fail. This -setting is also applicable when `_all`, `*` or no index has been specified. This +start with `foo`, then depending on this setting the request will fail. This +setting is also applicable when `_all`, `*`, or no index has been specified. This settings also applies for aliases, in case an alias points to a closed index. `expand_wildcards`:: -Controls to what kind of concrete indices wildcard indices expression expand +Controls what kind of concrete indices that wildcard indices expressions can expand to. If `open` is specified then the wildcard expression is expanded to only -open indices and if `closed` is specified then the wildcard expression is +open indices. If `closed` is specified then the wildcard expression is expanded only to closed indices. Also both values (`open,closed`) can be specified to expand to all indices. + -If `none` is specified then wildcard expansion will be disabled and if `all` +If `none` is specified then wildcard expansion will be disabled. If `all` is specified, wildcard expressions will expand to all indices (this is equivalent to specifying `open,closed`). -The defaults settings for the above parameters depend on the api being used. +The defaults settings for the above parameters depend on the API being used. NOTE: Single index APIs such as the <> and the <> do not support multiple indices. @@ -67,7 +68,7 @@ execution performance. For example, if you are searching for errors in your daily logs, you can use a date math name template to restrict the search to the past two days. -Almost all APIs that have an `index` parameter, support date math in the `index` parameter +Almost all APIs that have an `index` parameter support date math in the `index` parameter value. A date math index name takes the following form: @@ -82,8 +83,12 @@ Where: [horizontal] `static_name`:: is the static text part of the name `date_math_expr`:: is a dynamic date math expression that computes the date dynamically -`date_format`:: is the optional format in which the computed date should be rendered. Defaults to `YYYY.MM.dd`. -`time_zone`:: is the optional time zone . Defaults to `utc`. +`date_format`:: is the optional format in which the computed date should be rendered. Defaults to `yyyy.MM.dd`. Format should be compatible with java-time https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html +`time_zone`:: is the optional time zone. Defaults to `utc`. + +NOTE: Pay attention to the usage of small vs capital letters used in the `date_format`. For example: +`mm` denotes minute of hour, while `MM` denotes month of year. Similarly `hh` denotes the hour in the +`1-12` range in combination with `AM/PM`, while `HH` denotes the hour in the `0-23` 24-hour range. Date math expressions are resolved locale-independent. Consequently, it is not possible to use any other calendars than the Gregorian calendar. @@ -125,16 +130,16 @@ The special characters used for date rounding must be URI encoded as follows: ====================================================== The following example shows different forms of date math index names and the final index names -they resolve to given the current time is 22rd March 2024 noon utc. +they resolve to given the current time is 22nd March 2024 noon utc. [options="header"] |====== | Expression |Resolves to | `` | `logstash-2024.03.22` | `` | `logstash-2024.03.01` -| `` | `logstash-2024.03` -| `` | `logstash-2024.02` -| `` | `logstash-2024.03.23` +| `` | `logstash-2024.03` +| `` | `logstash-2024.02` +| `` | `logstash-2024.03.23` |====== To use the characters `{` and `}` in the static part of an index name template, escape them @@ -144,7 +149,7 @@ with a backslash `\`, for example: The following example shows a search request that searches the Logstash indices for the past three days, assuming the indices use the default Logstash index name format, -`logstash-YYYY.MM.dd`. +`logstash-yyyy.MM.dd`. [source,js] ---------------------------------------------------------------------- @@ -180,8 +185,8 @@ to set `?format=yaml` which will cause the result to be returned in the === Human readable output Statistics are returned in a format suitable for humans -(eg `"exists_time": "1h"` or `"size": "1kb"`) and for computers -(eg `"exists_time_in_millis": 3600000` or `"size_in_bytes": 1024`). +(e.g. `"exists_time": "1h"` or `"size": "1kb"`) and for computers +(e.g. `"exists_time_in_millis": 3600000` or `"size_in_bytes": 1024`). The human readable values can be turned off by adding `?human=false` to the query string. This makes sense when the stats results are being consumed by a monitoring tool, rather than intended for human @@ -193,7 +198,7 @@ consumption. The default for the `human` flag is === Date Math Most parameters which accept a formatted date value -- such as `gt` and `lt` -in <> `range` queries, or `from` and `to` +in <>, or `from` and `to` in <> -- understand date maths. @@ -201,28 +206,29 @@ The expression starts with an anchor date, which can either be `now`, or a date string ending with `||`. This anchor date can optionally be followed by one or more maths expressions: -* `+1h` - add one hour -* `-1d` - subtract one day -* `/d` - round down to the nearest day +* `+1h`: Add one hour +* `-1d`: Subtract one day +* `/d`: Round down to the nearest day The supported time units differ from those supported by <> for durations. The supported units are: [horizontal] -`y`:: years -`M`:: months -`w`:: weeks -`d`:: days -`h`:: hours -`H`:: hours -`m`:: minutes -`s`:: seconds +`y`:: Years +`M`:: Months +`w`:: Weeks +`d`:: Days +`h`:: Hours +`H`:: Hours +`m`:: Minutes +`s`:: Seconds Assuming `now` is `2001-01-01 12:00:00`, some examples are: +[horizontal] `now+1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 13:00:00` `now-1h`:: `now` in milliseconds minus one hour. Resolves to: `2001-01-01 11:00:00` -`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00`` +`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00` `2001.02.01\|\|+1M/d`:: `2001-02-01` in milliseconds plus one month. Resolves to: `2001-03-01 00:00:00` [float] @@ -396,8 +402,8 @@ GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc [float] === Flat Settings -The `flat_settings` flag affects rendering of the lists of settings. When -`flat_settings` flag is `true` settings are returned in a flat format: +The `flat_settings` flag affects rendering of the lists of settings. When the +`flat_settings` flag is `true`, settings are returned in a flat format: [source,js] -------------------------------------------------- @@ -427,7 +433,7 @@ Returns: // TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.twitter.settings.index\\\\.uuid/] // TESTRESPONSE[s/"index.version.created": \.\.\./"index.version.created": $body.twitter.settings.index\\\\.version\\\\.created/] -When the `flat_settings` flag is `false` settings are returned in a more +When the `flat_settings` flag is `false`, settings are returned in a more human readable structured format: [source,js] @@ -462,7 +468,7 @@ Returns: // TESTRESPONSE[s/n6gzFZTgS664GUfx0Xrpjw/$body.twitter.settings.index.uuid/] // TESTRESPONSE[s/"created": \.\.\./"created": $body.twitter.settings.index.version.created/] -By default the `flat_settings` is set to `false`. +By default `flat_settings` is set to `false`. [float] === Parameters @@ -473,7 +479,7 @@ convention of using underscore casing. [float] === Boolean Values -All REST APIs parameters (both request parameters and JSON body) support +All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. @@ -491,19 +497,19 @@ Whenever durations need to be specified, e.g. for a `timeout` parameter, the dur the unit, like `2d` for 2 days. The supported units are: [horizontal] -`d`:: days -`h`:: hours -`m`:: minutes -`s`:: seconds -`ms`:: milliseconds -`micros`:: microseconds -`nanos`:: nanoseconds +`d`:: Days +`h`:: Hours +`m`:: Minutes +`s`:: Seconds +`ms`:: Milliseconds +`micros`:: Microseconds +`nanos`:: Nanoseconds [[byte-units]] [float] === Byte size units -Whenever the byte size of data needs to be specified, eg when setting a buffer size +Whenever the byte size of data needs to be specified, e.g. when setting a buffer size parameter, the value must specify the unit, like `10kb` for 10 kilobytes. Note that these units use powers of 1024, so `1kb` means 1024 bytes. The supported units are: @@ -525,7 +531,6 @@ If one of these quantities is large we'll print it out like 10m for 10,000,000 o when we mean 87 though. These are the supported multipliers: [horizontal] -``:: Single `k`:: Kilo `m`:: Mega `g`:: Giga @@ -537,8 +542,8 @@ when we mean 87 though. These are the supported multipliers: === Distance Units Wherever distances need to be specified, such as the `distance` parameter in -the <>), the default unit if none is specified is -the meter. Distances can be specified in other units, such as `"1km"` or +the <>), the default unit is meters if none is specified. +Distances can be specified in other units, such as `"1km"` or `"2mi"` (2 miles). The full list of units is listed below: @@ -552,7 +557,7 @@ Kilometer:: `km` or `kilometers` Meter:: `m` or `meters` Centimeter:: `cm` or `centimeters` Millimeter:: `mm` or `millimeters` -Nautical mile:: `NM`, `nmi` or `nauticalmiles` +Nautical mile:: `NM`, `nmi`, or `nauticalmiles` [[fuzziness]] [float] @@ -568,20 +573,21 @@ make it the same as another string. The `fuzziness` parameter can be specified as: +[horizontal] `0`, `1`, `2`:: -the maximum allowed Levenshtein Edit Distance (or number of edits) +The maximum allowed Levenshtein Edit Distance (or number of edits) `AUTO`:: + -- -generates an edit distance based on the length of the term. -Low and high distance arguments may be optionally provided `AUTO:[low],[high]`, if not specified, +Generates an edit distance based on the length of the term. +Low and high distance arguments may be optionally provided `AUTO:[low],[high]`. If not specified, the default values are 3 and 6, equivalent to `AUTO:3,6` that make for lengths: -`0..2`:: must match exactly -`3..5`:: one edit allowed -`>5`:: two edits allowed +`0..2`:: Must match exactly +`3..5`:: One edit allowed +`>5`:: Two edits allowed `AUTO` should generally be the preferred value for `fuzziness`. -- @@ -681,9 +687,9 @@ The type of the content sent in a request body must be specified using the `Content-Type` header. The value of this header must map to one of the supported formats that the API supports. Most APIs support JSON, YAML, CBOR, and SMILE. The bulk and multi-search APIs support NDJSON, -JSON and SMILE; other types will result in an error response. +JSON, and SMILE; other types will result in an error response. -Additionally, when using the `source` query string parameter the +Additionally, when using the `source` query string parameter, the content type must be specified using the `source_content_type` query string parameter. @@ -692,7 +698,7 @@ string parameter. Many users use a proxy with URL-based access control to secure access to Elasticsearch indices. For <>, -<> and <> requests, the user has +<>, and <> requests, the user has the choice of specifying an index in the URL and on each individual request within the request body. This can make URL-based access control challenging. diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 0da752aba42d2..743bae7b85679 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -40,7 +40,7 @@ Might respond with: id host ip node u_n93zwxThWHi1PDBJAGAg 127.0.0.1 127.0.0.1 u_n93zw -------------------------------------------------- -// TESTRESPONSE[s/u_n93zw(xThWHi1PDBJAGAg)?/.+/ _cat] +// TESTRESPONSE[s/u_n93zw(xThWHi1PDBJAGAg)?/.+/ non_json] [float] [[help]] @@ -64,7 +64,7 @@ host | h | host name ip | | ip address node | n | node name -------------------------------------------------- -// TESTRESPONSE[s/[|]/[|]/ _cat] +// TESTRESPONSE[s/[|]/[|]/ non_json] NOTE: `help` is not supported if any optional url parameter is used. For example `GET _cat/shards/twitter?help` or `GET _cat/indices/twi*?help` @@ -90,7 +90,7 @@ Responds with: -------------------------------------------------- 127.0.0.1 9300 27 sLBaIGK -------------------------------------------------- -// TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ _cat] +// TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ non_json] You can also request multiple columns using simple wildcards like `/_cat/thread_pool?h=ip,queue*` to get all headers (or aliases) starting diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 394231e448dc0..a46a4c658d4b0 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -49,7 +49,7 @@ alias2 test1 * - - alias3 test1 - 1 1 alias4 test1 - 2 1,2 -------------------------------------------------- -// TESTRESPONSE[s/[*]/[*]/ _cat] +// TESTRESPONSE[s/[*]/[*]/ non_json] The output shows that `alias2` has configured a filter, and specific routing configurations in `alias3` and `alias4`. diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index a9de182e3c00e..8d2c931665ba1 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -19,7 +19,8 @@ shards disk.indices disk.used disk.avail disk.total disk.percent host ip 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] -// TESTRESPONSE[s/CSUXak2/.+/ _cat] +// TESTRESPONSE[s/CSUXak2/.+/ non_json] + +Here we can see that the single shard created has been allocated to the single +node available. -Here we can see that each node has been allocated a single shard and -that they're all using about the same amount of space. diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 28dc39adc8dff..01fec6df73180 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -19,7 +19,7 @@ Looks like: epoch timestamp count 1475868259 15:24:19 121 -------------------------------------------------- -// TESTRESPONSE[s/1475868259 15:24:19/\\d+ \\d+:\\d+:\\d+/ _cat] +// TESTRESPONSE[s/1475868259 15:24:19/\\d+ \\d+:\\d+:\\d+/ non_json] Or for a single index: @@ -35,7 +35,7 @@ GET /_cat/count/twitter?v epoch timestamp count 1475868259 15:24:20 120 -------------------------------------------------- -// TESTRESPONSE[s/1475868259 15:24:20/\\d+ \\d+:\\d+:\\d+/ _cat] +// TESTRESPONSE[s/1475868259 15:24:20/\\d+ \\d+:\\d+:\\d+/ non_json] NOTE: The document count indicates the number of live documents and does not include deleted documents which have not yet been cleaned up by the merge process. diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index ff71728a4f50b..f94bcd6fe5d78 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -52,7 +52,7 @@ Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b -------------------------------------------------- // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ non_json] Fields can be specified either as a query parameter, or in the URL path: @@ -71,7 +71,7 @@ id host ip node field size Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b -------------------------------------------------- // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ _cat] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ non_json] And it accepts a comma delimited list: @@ -91,6 +91,6 @@ Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b -------------------------------------------------- // TESTRESPONSE[s/544b|480b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ _cat] +// TESTRESPONSE[s/Nqk-6in[^ ]*/.+/ s/soul|body/\\w+/ non_json] The output shows the individual fielddata for the `body` and `soul` fields, one row per field per node. diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index 5f053edf30866..883119925fd5f 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -17,7 +17,7 @@ epoch timestamp cluster status node.total node.data shards pri relo i 1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/] -// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] +// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ non_json] It has one option `ts` to disable the timestamping: @@ -35,7 +35,7 @@ which looks like: cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- -// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] +// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ non_json] A common use of this command is to verify the health is consistent across nodes: diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 2a5b865fefa47..653889dac8315 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -21,7 +21,7 @@ yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ non_json] We can tell quickly how many shards make up an index, the number of docs, deleted docs, primary store size, and total store size (all shards including replicas). @@ -64,7 +64,7 @@ health status index uuid pri rep docs.count docs.deleted st yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ _cat] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ/.+/ non_json] Which index has the largest number of documents? @@ -84,7 +84,7 @@ yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] +// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ non_json] How many merge operations have the shards for the `twitter` completed? @@ -102,7 +102,7 @@ Might look like: health index pri rep docs.count mt pri.mt yellow twitter 1 1 1200 16 16 -------------------------------------------------- -// TESTRESPONSE[s/16/\\d+/ _cat] +// TESTRESPONSE[s/16/\\d+/ non_json] How much memory is used per index? @@ -122,4 +122,4 @@ twitter 8.1gb twitter2 30.5kb -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index dfa10d6e3a4e6..a38baa455caee 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -17,7 +17,7 @@ might respond: id host ip node YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2 -------------------------------------------------- -// TESTRESPONSE[s/YzWoH_2.+/.+/ _cat] +// TESTRESPONSE[s/YzWoH_2.+/.+/ non_json] This information is also available via the `nodes` command, but this is slightly shorter when all you want to do, for example, is verify diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 2b893a4c79b11..e5c335f7c375f 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -22,7 +22,7 @@ node-0 127.0.0.1 127.0.0.1 testattr test ... -------------------------------------------------- // TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/] -// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat] +// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ non_json] // If xpack is not installed then neither ... with match anything // If xpack is installed then the first ... contains ml attributes // and the second contains xpack.installed=true @@ -68,7 +68,7 @@ node-0 19566 testattr test -------------------------------------------------- // TESTRESPONSE[s/19566/\\d*/] // TESTRESPONSE[s/\.\.\.\n$/\n(.+ xpack\\.installed true\n)?\n/] -// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ _cat] +// TESTRESPONSE[s/\.\.\.\n/(.+ ml\\..+\n)*/ non_json] // If xpack is not installed then neither ... with match anything // If xpack is installed then the first ... contains ml attributes // and the second contains xpack.installed=true diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index 151ce80196b50..06ea5697c785c 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -18,7 +18,7 @@ ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master -------------------------------------------------- // TESTRESPONSE[s/3.07/(\\d+\\.\\d+( \\d+\\.\\d+ (\\d+\\.\\d+)?)?)?/] // TESTRESPONSE[s/65 99 42/\\d+ \\d+ \\d+/] -// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ _cat] +// TESTRESPONSE[s/[*]/[*]/ s/mJw06l1/.+/ non_json] The first few columns (`ip`, `heap.percent`, `ram.percent`, `cpu`, `load_*`) tell you where your nodes live and give a quick picture of performance stats. @@ -63,7 +63,7 @@ Might look like: id ip port v m veJR 127.0.0.1 59938 {version} * -------------------------------------------------- -// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ _cat] +// TESTRESPONSE[s/veJR/.+/ s/59938/\\d+/ s/[*]/[*]/ non_json] [cols="<,<,<,<,<",options="header",subs="normal"] |======================================================================= diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index d5216c1eb0023..ec923f270c571 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -24,6 +24,6 @@ insertOrder timeInQueue priority source 1690 787ms HIGH update-mapping [foo][t] 1691 773ms HIGH update-mapping [foo][t] -------------------------------------------------- -// TESTRESPONSE[s/(\n.+)+/(\\n.+)*/ _cat] +// TESTRESPONSE[s/(\n.+)+/(\\n.+)*/ non_json] // We can't assert anything about the tasks in progress here because we don't // know what might be in progress.... diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 5e516fee72cf4..af683a20f37cc 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -14,7 +14,7 @@ Might look like: ["source","txt",subs="attributes,callouts"] ------------------------------------------------------------------------------ name component version description -U7321H6 analysis-icu {version_qualified} The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components. +U7321H6 analysis-icu {version_qualified} The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components. U7321H6 analysis-kuromoji {version_qualified} The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch. U7321H6 analysis-nori {version_qualified} The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch. U7321H6 analysis-phonetic {version_qualified} The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. @@ -31,6 +31,6 @@ U7321H6 mapper-size {version_qualified} The Mapper Size plugin allow U7321H6 store-smb {version_qualified} The Store SMB plugin adds support for SMB stores. U7321H6 transport-nio {version_qualified} The nio transport. ------------------------------------------------------------------------------ -// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ _cat] +// TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ non_json] We can tell quickly how many plugins per node we have and which versions. diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index c4288f882e21e..1477dfb676f9f 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -30,7 +30,7 @@ twitter 0 13ms store done n/a n/a 127.0.0.1 node-0 n // TESTRESPONSE[s/100%/0.0%/] // TESTRESPONSE[s/9928/0/] // TESTRESPONSE[s/13ms/\\d+m?s/] -// TESTRESPONSE[s/13/\\d+/ _cat] +// TESTRESPONSE[s/13/\\d+/ non_json] In the above case, the source and target nodes are the same because the recovery type was store, i.e. they were read from local storage on node start. @@ -57,7 +57,7 @@ twitter 0 1252ms peer done 192.168.1.1 192.168.1.2 0 100.0% 0 100.0% // TESTRESPONSE[s/192.168.1.2/127.0.0.1/] // TESTRESPONSE[s/192.168.1.1/n\/a/] // TESTRESPONSE[s/100.0%/0.0%/] -// TESTRESPONSE[s/1252/\\d+/ _cat] +// TESTRESPONSE[s/1252/\\d+/ non_json] We can see in the above listing that our thw twitter shard was recovered from another node. Notice that the recovery type is shown as `peer`. The files and bytes copied are @@ -81,4 +81,4 @@ This will show a recovery of type snapshot in the response i s t ty st rep snap f fp b bp twitter 0 1978ms snapshot done twitter snap_1 79 8.0% 12086 9.0% -------------------------------------------------------------------------------- -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index 8caf3c5fd6f73..89daf7748a5d4 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -19,6 +19,6 @@ id type repo1 fs repo2 s3 -------------------------------------------------- -// TESTRESPONSE[s/\nrepo2 s3// _cat] +// TESTRESPONSE[s/\nrepo2 s3// non_json] We can quickly see which repositories are registered and their type. diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index a4c2c54d8eefd..5fa2f66e384a0 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -20,7 +20,7 @@ index shard prirep ip segment generation docs.count docs.deleted size siz test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -------------------------------------------------- -// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] +// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ non_json] The output shows information about index names and shard numbers in the first two columns. diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index f63e37c6a3d69..345e493375400 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -23,7 +23,7 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA // TESTRESPONSE[s/3014/\\d+/] // TESTRESPONSE[s/31.1mb/\\d+(\.\\d+)?[kmg]?b/] // TESTRESPONSE[s/192.168.56.10/.*/] -// TESTRESPONSE[s/H5dfFeA/node-0/ _cat] +// TESTRESPONSE[s/H5dfFeA/node-0/ non_json] [float] [[index-pattern]] @@ -49,14 +49,14 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA // TESTRESPONSE[s/3014/\\d+/] // TESTRESPONSE[s/31.1mb/\\d+(\.\\d+)?[kmg]?b/] // TESTRESPONSE[s/192.168.56.10/.*/] -// TESTRESPONSE[s/H5dfFeA/node-0/ _cat] +// TESTRESPONSE[s/H5dfFeA/node-0/ non_json] [float] [[relocation]] === Relocation -Let's say you've checked your health and you see a relocating +Let's say you've checked your health and you see relocating shards. Where are they from and where are they going? [source,js] @@ -72,7 +72,7 @@ A relocating shard will be shown as follows --------------------------------------------------------------------------- twitter 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE --------------------------------------------------------------------------- -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] [float] [[states]] @@ -95,7 +95,7 @@ You can get the initializing state in the response like this twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA twitter 0 r INITIALIZING 0 14.3mb 192.168.56.30 bGG90GE --------------------------------------------------------------------------- -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] If a shard cannot be assigned, for example you've overallocated the number of replicas for the number of nodes in the cluster, the shard @@ -119,7 +119,7 @@ twitter 0 r STARTED 3014 31.1mb 192.168.56.30 bGG90GE twitter 0 r STARTED 3014 31.1mb 192.168.56.20 I8hydUG twitter 0 r UNASSIGNED ALLOCATION_FAILED --------------------------------------------------------------------------- -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] [float] [[reason-unassigned]] diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 5677a0f2a7cd4..e8c5320043026 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -26,7 +26,7 @@ snap2 SUCCESS 1445634298 23:04:58 1445634672 23:11:12 6.2m 2 // TESTRESPONSE[s/\d+:\d+:\d+/\\d+:\\d+:\\d+/] // TESTRESPONSE[s/1 4 1 5/\\d+ \\d+ \\d+ \\d+/] // TESTRESPONSE[s/2 10 0 10/\\d+ \\d+ \\d+ \\d+/] -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] Each snapshot contains information about when it was started and stopped. Start and stop timestamps are available in two formats. diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 304459ba96edb..41d27cfbf0306 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -28,7 +28,7 @@ template0 [te*] 0 template1 [tea*] 1 template2 [teak*] 2 7 -------------------------------------------------- -// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ _cat] +// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ non_json] The output shows that there are three existing templates, with template2 having a version value. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index d1ea1fad88515..f63abc8903650 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -23,7 +23,7 @@ node-0 flush 0 0 0 node-0 write 0 0 0 -------------------------------------------------- // TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/] -// TESTRESPONSE[s/\d+/\\d+/ _cat] +// TESTRESPONSE[s/\d+/\\d+/ non_json] // The substitutions do two things: // 1. Expect any number of extra thread pools. This allows us to only list a // few thread pools. The list would be super long otherwise. In addition, @@ -59,7 +59,7 @@ ml_autodetect (default distro only) ml_datafeed (default distro only) ml_utility (default distro only) refresh -rollup_indexing (default distro only)` +rollup_indexing (default distro only) search security-token-key (default distro only) snapshot @@ -107,7 +107,7 @@ which looks like: id name active rejected completed 0EWUhXeBQtaVGlexUeVwMg generic 0 0 70 -------------------------------------------------- -// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ _cat] +// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ non_json] Here the host columns and the active, rejected and completed suggest thread pool statistics are displayed. diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index f64fb7e91d665..e2e91334402f7 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Delete auto-follow pattern ++++ -beta[] - Delete auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 3db92ce6222b0..9eb18b0aa00b9 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Get auto-follow pattern ++++ -beta[] - Get auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index f1a4a974602cb..3ed6cd947028e 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -6,8 +6,6 @@ Create auto-follow pattern ++++ -beta[] - Creates an auto-follow pattern. ==== Description diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 2009742c8322b..b737167ce3165 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -1,9 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ccr-apis]] -== Cross-cluster replication APIs - -beta[] +== {ccr-cap} APIs You can use the following APIs to perform {ccr} operations. @@ -21,6 +19,7 @@ You can use the following APIs to perform {ccr} operations. * <> * <> * <> +* <> * <> * <> @@ -40,6 +39,7 @@ include::follow/put-follow.asciidoc[] include::follow/post-pause-follow.asciidoc[] include::follow/post-resume-follow.asciidoc[] include::follow/post-unfollow.asciidoc[] +include::follow/post-forget-follower.asciidoc[] include::follow/get-follow-stats.asciidoc[] include::follow/get-follow-info.asciidoc[] diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc index e7e6ae2e26a05..d8fb725f02b14 100644 --- a/docs/reference/ccr/apis/follow-request-body.asciidoc +++ b/docs/reference/ccr/apis/follow-request-body.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [testenv="platinum"] `max_read_request_operation_count`:: (integer) the maximum number of operations to pull per read from the remote diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 212b1167b6e33..eca2f5e8e98f9 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -6,8 +6,6 @@ Get follower info ++++ -beta[] - Retrieves information about all follower indices. ==== Description diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 8c02582e01278..840d9da22f6e4 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -6,8 +6,6 @@ Get follower stats ++++ -beta[] - Get follower stats. ==== Description @@ -69,8 +67,8 @@ The `indices` array consists of objects containing two fields: The `shards` array consists of objects containing the following fields: `indices[].shards[].remote_cluster`:: - (string) the > containing the leader - index + (string) The <> containing the leader + index. `indices[].shards[].leader_index`:: (string) the name of the index in the leader cluster being followed diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc new file mode 100644 index 0000000000000..50cefc82009fe --- /dev/null +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -0,0 +1,152 @@ +[role="xpack"] +[testenv="platinum"] +[[ccr-post-forget-follower]] +=== Forget Follower API +++++ +Forget Follower +++++ + +Removes the follower retention leases from the leader. + +==== Description + +A following index takes out retention leases on its leader index. These +retention leases are used to increase the likelihood that the shards of the +leader index retain the history of operations that the shards of the following +index need to execute replication. When a follower index is converted to a +regular index via the <> (either via explicit +execution of this API, or implicitly via {ilm}), these retention leases are +removed. However, removing these retention leases can fail (e.g., if the remote +cluster containing the leader index is unavailable). While these retention +leases will eventually expire on their own, their extended existence can cause +the leader index to hold more history than necessary, and prevent {ilm} from +performing some operations on the leader index. This API exists to enable +manually removing these retention leases when the unfollow API was unable to do +so. + +NOTE: This API does not stop replication by a following index. If you use this +API targeting a follower index that is still actively following, the following +index will add back retention leases on the leader. The only purpose of this API +is to handle the case of failure to remove the following retention leases after +the <> is invoked. + +==== Request + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 +{ + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP +// TEST[setup:remote_cluster_and_leader_index] + +[source,js] +-------------------------------------------------- +POST /follower_index/_ccr/pause_follow +-------------------------------------------------- +// CONSOLE +// TEARDOWN + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST //_ccr/forget_follower +{ + "follower_cluster" : "", + "follower_index" : "", + "follower_index_uuid" : "", + "leader_remote_cluster" : "" +} +-------------------------------------------------- +// CONSOLE +// TEST[s//leader_index/] +// TEST[s//follower_cluster/] +// TEST[s//follower_index/] +// TEST[s//follower_index_uuid/] +// TEST[s//leader_remote_cluster/] +// TEST[skip_shard_failures] + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 1, + "successful" : 1, + "failed" : 0, + "failures" : [ ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"total" : 1/"total" : $body._shards.total/] +// TESTRESPONSE[s/"successful" : 1/"successful" : $body._shards.successful/] +// TESTRESPONSE[s/"failed" : 0/"failed" : $body._shards.failed/] +// TESTRESPONSE[s/"failures" : \[ \]/"failures" : $body._shards.failures/] + +==== Path Parameters + +`leader_index` (required):: + (string) the name of the leader index + +==== Request Body +`follower_cluster` (required):: + (string) the name of the cluster containing the follower index + +`follower_index` (required):: + (string) the name of the follower index + +`follower_index_uuid` (required):: + (string) the UUID of the follower index + +`leader_remote_cluster` (required):: + (string) the alias (from the perspective of the cluster containing the + follower index) of the <> containing + the leader index + +==== Authorization + +If the {es} {security-features} are enabled, you must have `manage_leader_index` +index privileges for the leader index. For more information, see +{stack-ov}/security-privileges.html[Security privileges]. + +==== Example + +This example removes the follower retention leases for `follower_index` from +`leader_index`. + +[source,js] +-------------------------------------------------- +POST /leader_index/_ccr/forget_follower +{ + "follower_cluster" : "follower_cluster", + "follower_index" : "follower_index", + "follower_index_uuid" : "vYpnaWPRQB6mNspmoCeYyA", + "leader_remote_cluster" : "leader_cluster" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip_shard_failures] + +The API returns the following result: + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 1, + "successful" : 1, + "failed" : 0, + "failures" : [ ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"total" : 1/"total" : $body._shards.total/] +// TESTRESPONSE[s/"successful" : 1/"successful" : $body._shards.successful/] +// TESTRESPONSE[s/"failed" : 0/"failed" : $body._shards.failed/] +// TESTRESPONSE[s/"failures" : \[ \]/"failures" : $body._shards.failures/] diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index f5b0bef7b2994..60de85cabdcbd 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -6,8 +6,6 @@ Pause follower ++++ -beta[] - Pauses a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 736061f2bfde8..279f4139cdddf 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -6,8 +6,6 @@ Resume follower ++++ -beta[] - Resumes a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index c3126d02d1efc..236d2723a94dc 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -6,8 +6,6 @@ Unfollow ++++ -beta[] - Converts a follower index to a regular index. ==== Description diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 52253d6ad2f4c..8098fcff1cd53 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -6,8 +6,6 @@ Create follower ++++ -beta[] - Creates a follower index. ==== Description diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 8949de8787fa7..03f2f3eee0c0b 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -1,13 +1,11 @@ [role="xpack"] [testenv="platinum"] [[ccr-get-stats]] -=== Get cross-cluster replication stats API +=== Get {ccr} stats API ++++ Get CCR stats ++++ -beta[] - Get {ccr} stats. ==== Description diff --git a/docs/reference/ccr/auto-follow.asciidoc b/docs/reference/ccr/auto-follow.asciidoc index a7f4b95f42202..580b2b11d7244 100644 --- a/docs/reference/ccr/auto-follow.asciidoc +++ b/docs/reference/ccr/auto-follow.asciidoc @@ -3,8 +3,6 @@ [[ccr-auto-follow]] === Automatically following indices -beta[] - In time series use cases where you want to follow new indices that are periodically created (such as daily Beats indices), manually configuring follower indices for each new leader index can be an operational burden. The auto-follow diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 7c59b8628052f..24304fea7642a 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -3,8 +3,6 @@ [[ccr-getting-started]] == Getting started with {ccr} -beta[] - This getting-started guide for {ccr} shows you how to: * < - } - } + "number_of_replicas" : 0 } }, "mappings" : { @@ -216,7 +202,6 @@ PUT /server-metrics -------------------------------------------------- // CONSOLE // TEST[continued] -<1> Sets that up to 1024 soft deletes will be retained. [float] [[ccr-getting-started-follower-index]] @@ -253,6 +238,11 @@ PUT /server-metrics-copy/_ccr/follow?wait_for_active_shards=1 ////////////////////////// +The follower index is initialized using the <> +process. The remote recovery process transfers the existing Lucene segment files +from the leader to the follower. When the remote recovery process is complete, +the index following begins. + Now when you index documents into your leader index, you will see these documents replicated in the follower index. You can inspect the status of replication using the @@ -335,4 +325,4 @@ Alternatively, you can manage auto-follow patterns on the *Management / Elasticsearch / Cross Cluster Replication* page in {kib}: [role="screenshot"] -image::ml/images/auto-follow-patterns.jpg["The Auto-follow patterns page in {kib}"] +image::images/auto-follow-patterns.jpg["The Auto-follow patterns page in {kib}"] diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc index be281d05c05f3..ba2fa0d1e7820 100644 --- a/docs/reference/ccr/index.asciidoc +++ b/docs/reference/ccr/index.asciidoc @@ -1,14 +1,12 @@ [role="xpack"] [testenv="platinum"] [[xpack-ccr]] -= Cross-cluster replication += {ccr-cap} [partintro] -- -beta[] - -The {ccr} (CCR) feature enables replication of indices in remote clusters to a +The {ccr} (CCR) feature enables replication of indices in remote clusters to a local cluster. This functionality can be used in some common production use cases: @@ -22,6 +20,7 @@ This guide provides an overview of {ccr}: * <> * <> * <> +* <> -- @@ -29,3 +28,5 @@ include::overview.asciidoc[] include::requirements.asciidoc[] include::auto-follow.asciidoc[] include::getting-started.asciidoc[] +include::remote-recovery.asciidoc[] +include::upgrading.asciidoc[] diff --git a/docs/reference/ccr/overview.asciidoc b/docs/reference/ccr/overview.asciidoc index 0ad9039d8710d..5a7f4bda4289e 100644 --- a/docs/reference/ccr/overview.asciidoc +++ b/docs/reference/ccr/overview.asciidoc @@ -3,9 +3,8 @@ [[ccr-overview]] == Overview -beta[] -Cross-cluster replication is done on an index-by-index basis. Replication is +{ccr-cap} is done on an index-by-index basis. Replication is configured at the index level. For each configured replication there is a replication source index called the _leader index_ and a replication target index called the _follower index_. @@ -18,19 +17,58 @@ Replication is pull-based. This means that replication is driven by the follower index. This simplifies state management on the leader index and means that {ccr} does not interfere with indexing on the leader index. +IMPORTANT: {ccr-cap} requires {ref}/modules-remote-clusters.html[remote clusters]. + [float] === Configuring replication Replication can be configured in two ways: -* Manually using the - {ref}/ccr-put-follow.html[create follower API] +* Manually creating specific follower indices (in {kib} or by using the +{ref}/ccr-put-follow.html[create follower API]) + +* Automatically creating follower indices from auto-follow patterns (in {kib} or +by using the {ref}/ccr-put-auto-follow-pattern.html[create auto-follow pattern API]) -* Automatically using - <> +For more information about managing {ccr} in {kib}, see +{kibana-ref}/working-remote-clusters.html[Working with remote clusters]. NOTE: You must also <>. +When you initiate replication either manually or through an auto-follow pattern, the +follower index is created on the local cluster. Once the follower index is created, +the <> process copies all of the Lucene segment +files from the remote cluster to the local cluster. + +By default, if you initiate following manually (by using {kib} or the create follower API), +the recovery process is asynchronous in relationship to the +{ref}/ccr-put-follow.html[create follower request]. The request returns before +the <> process completes. If you would like to wait on +the process to complete, you can use the `wait_for_active_shards` parameter. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 +{ + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP +// TEST[setup:remote_cluster_and_leader_index] + +[source,js] +-------------------------------------------------- +POST /follower_index/_ccr/pause_follow +-------------------------------------------------- +// CONSOLE +// TEARDOWN + +////////////////////////// + [float] === The mechanics of replication @@ -58,7 +96,7 @@ If a read request fails, the cause of the failure is inspected. If the cause of the failure is deemed to be a failure that can be recovered from (for example, a network failure), the follower shard task enters into a retry loop. Otherwise, the follower shard task is paused and requires user -intervention before the it can be resumed with the +intervention before it can be resumed with the {ref}/ccr-post-resume-follow.html[resume follower API]. When operations are received by the follower shard task, they are placed in a @@ -71,6 +109,10 @@ limits, no additional read requests are sent by the follower shard task. The follower shard task resumes sending read requests when the write buffer no longer exceeds its configured limits. +NOTE: The intricacies of how operations are replicated from the leader are +governed by settings that you can configure when you create the follower index +in {kib} or by using the {ref}/ccr-put-follow.html[create follower API]. + Mapping updates applied to the leader index are automatically retrieved as-needed by the follower index. @@ -104,6 +146,68 @@ Using these APIs in tandem enables you to adjust the read and write parameters on the follower shard task if your initial configuration is not suitable for your use case. +[float] +=== Leader index retaining operations for replication + +If the follower is unable to replicate operations from a leader for a period of +time, the following process can fail due to the leader lacking a complete history +of operations necessary for replication. + +Operations replicated to the follower are identified using a sequence number +generated when the operation was initially performed. Lucene segment files are +occasionally merged in order to optimize searches and save space. When these +merges occur, it is possible for operations associated with deleted or updated +documents to be pruned during the merge. When the follower requests the sequence +number for a pruned operation, the process will fail due to the operation missing +on the leader. + +This scenario is not possible in an append-only workflow. As documents are never +deleted or updated, the underlying operation will not be pruned. + +Elasticsearch attempts to mitigate this potential issue for update workflows using +a Lucene feature called soft deletes. When a document is updated or deleted, the +underlying operation is retained in the Lucene index for a period of time. This +period of time is governed by the `index.soft_deletes.retention_lease.period` +setting which can be <>. + +When a follower initiates the index following, it acquires a retention lease from +the leader. This informs the leader that it should not allow a soft delete to be +pruned until either the follower indicates that it has received the operation or +the lease expires. It is valuable to have monitoring in place to detect a follower +replication issue prior to the lease expiring so that the problem can be remedied +before the follower falls fatally behind. + +[float] +=== Remedying a follower that has fallen behind + +If a follower falls sufficiently behind a leader that it can no longer replicate +operations this can be detected in {kib} or by using the +{ref}/ccr-get-follow-stats.html[get follow stats API]. It will be reported as a +`indices[].fatal_exception`. + +In order to restart the follower, you must pause the following process, close the +index, and the create follower index again. For example: + +["source","js"] +---------------------------------------------------------------------- +POST /follower_index/_ccr/pause_follow + +POST /follower_index/_close + +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 +{ + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index" +} +---------------------------------------------------------------------- +// CONSOLE + +Re-creating the follower index is a destructive action. All of the existing Lucene +segment files are deleted on the follower cluster. The +<> process copies the Lucene segment +files from the leader again. After the follower index initializes, the +following process starts again. + [float] === Terminating replication diff --git a/docs/reference/ccr/remote-recovery.asciidoc b/docs/reference/ccr/remote-recovery.asciidoc new file mode 100644 index 0000000000000..fcf03cfc72814 --- /dev/null +++ b/docs/reference/ccr/remote-recovery.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[testenv="platinum"] +[[remote-recovery]] +== Remote recovery + +When you create a follower index, you cannot use it until it is fully initialized. +The _remote recovery_ process builds a new copy of a shard on a follower node by +copying data from the primary shard in the leader cluster. {es} uses this remote +recovery process to bootstrap a follower index using the data from the leader index. +This process provides the follower with a copy of the current state of the leader index, +even if a complete history of changes is not available on the leader due to Lucene +segment merging. + +Remote recovery is a network intensive process that transfers all of the Lucene +segment files from the leader cluster to the follower cluster. The follower +requests that a recovery session be initiated on the primary shard in the leader +cluster. The follower then requests file chunks concurrently from the leader. By +default, the process concurrently requests `5` large `1mb` file chunks. This default +behavior is designed to support leader and follower clusters with high network latency +between them. + +There are dynamic settings that you can use to rate-limit the transmitted data +and manage the resources consumed by remote recoveries. See +{ref}/ccr-settings.html[{ccr-cap} settings]. + +You can obtain information about an in-progress remote recovery by using the +{ref}/cat-recovery.html[recovery API] on the follower cluster. Remote recoveries +are implemented using the {ref}/modules-snapshots.html[snapshot and restore] infrastructure. This means that on-going remote recoveries are labelled as type +`snapshot` in the recovery API. diff --git a/docs/reference/ccr/requirements.asciidoc b/docs/reference/ccr/requirements.asciidoc index 4736d641296cf..91fbd2c755328 100644 --- a/docs/reference/ccr/requirements.asciidoc +++ b/docs/reference/ccr/requirements.asciidoc @@ -3,9 +3,7 @@ [[ccr-requirements]] === Requirements for leader indices -beta[] - -Cross-cluster replication works by replaying the history of individual write +{ccr-cap} works by replaying the history of individual write operations that were performed on the shards of the leader index. This means that the history of these operations needs to be retained on the leader shards so that they can be pulled by the follower shard tasks. The underlying mechanism used to @@ -34,11 +32,13 @@ Whether or not soft deletes are enabled on the index. Soft deletes can only be configured at index creation and only on indices created on or after 6.5.0. The default value is `true`. -`index.soft_deletes.retention.operations`:: +`index.soft_deletes.retention_lease.period`:: -The number of soft deletes to retain. Soft deletes are collected during merges -on the underlying Lucene index yet retained up to the number of operations -configured by this setting. The default value is `0`. +The maximum period to retain a shard history retention lease before it is considered +expired. Shard history retention leases ensure that soft deletes are retained during +merges on the Lucene index. If a soft delete is merged away before it can be replicated +to a follower the following process will fail due to incomplete history on the leader. +The default value is `12h`. For more information about index settings, see {ref}/index-modules.html[Index modules]. diff --git a/docs/reference/ccr/upgrading.asciidoc b/docs/reference/ccr/upgrading.asciidoc new file mode 100644 index 0000000000000..4f8d8409b5bc8 --- /dev/null +++ b/docs/reference/ccr/upgrading.asciidoc @@ -0,0 +1,48 @@ +[role="xpack"] +[testenv="platinum"] +[[ccr-upgrading]] +== Upgrading clusters + +Clusters that are actively using {ccr} require a careful approach to upgrades. +Otherwise index following may fail during a rolling upgrade, because of the +following reasons: + +* If a new index setting or mapping type is replicated from an upgraded cluster + to a non-upgraded cluster then the non-upgraded cluster will reject that and + will fail index following. +* Lucene is not forwards compatible and when index following is falling back to + file based recovery then a node in a non-upgraded cluster will reject index + files from a newer Lucene version compared to what it is using. + +Rolling upgrading clusters with {ccr} is different in case of uni-directional +index following and bi-directional index following. + +[float] +=== Uni-directional index following + +In a uni-directional setup between two clusters, one cluster contains only +leader indices, and the other cluster contains only follower indices following +indices in the first cluster. + +In this setup, the cluster with follower indices should be upgraded +first and the cluster with leader indices should be upgraded last. +If clusters are upgraded in this order then index following can continue +during the upgrade without downtime. + +Note that a chain index following setup can also be upgraded in this way. +For example if there is a cluster A that contains all leader indices, +cluster B that follows indices in cluster A and cluster C that follows +indices in cluster B. In this case the cluster C should be upgraded first, +then cluster B and finally cluster A. + +[float] +=== Bi-directional index following + +In a bi-directional setup between two clusters, each cluster contains both +leader and follower indices. + +When upgrading clusters in this setup, all index following needs to be paused +using the {ref}/ccr-post-pause-follow.html[pause follower API] prior to +upgrading both clusters. After both clusters have been upgraded then index +following can be resumed using the +{ref}/ccr-post-resume-follow.html[resume follower API]]. diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index 9d53c8715d515..c51167ca9f974 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -143,7 +143,7 @@ The result will look similar to: "ip": "192.168.17", "version": "{version}", "build_flavor": "{build_flavor}", - "build_type": "zip", + "build_type": "{build_type}", "build_hash": "587409e", "roles": [ "master", @@ -224,7 +224,7 @@ The result will look similar to: "ip": "192.168.17", "version": "{version}", "build_flavor": "{build_flavor}", - "build_type": "zip", + "build_type": "{build_type}", "build_hash": "587409e", "roles": [], "attributes": {}, diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 4bd3c2c9647a5..bb24dffd40f7d 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -125,11 +125,6 @@ information that concern the file system: `fs.data.available_in_bytes`:: Total number of bytes available to this Java virtual machine on this file store -`fs.data.spins` (Linux only):: - Indicates if the file store is backed by spinning storage. - `null` means we could not determine it, `true` means the device possibly spins - and `false` means it does not (ex: solid-state disks). - `fs.io_stats.devices` (Linux only):: Array of disk metrics for each device that is backing an Elasticsearch data path. These disk metrics are probed periodically diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index a53a26873ce98..9e78367bc02b3 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -31,6 +31,7 @@ the configured remote cluster alias. `initial_connect_timeout`:: The initial connect timeout for remote cluster connections. +[[skip-unavailable]] `skip_unavailable`:: Whether the remote cluster is skipped in case it is searched through - a cross cluster search request but none of its nodes are available. + a {ccs} request but none of its nodes are available. diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index a20ff04d83f4a..34bb69a552c66 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -1,8 +1,22 @@ [[cluster-state]] == Cluster State -The cluster state API allows to get a comprehensive state information of -the whole cluster. +The cluster state API allows access to metadata representing the state of the +whole cluster. This includes information such as + +* the set of nodes in the cluster + +* all cluster-level settings + +* information about the indices in the cluster, including their mappings and + settings + +* the locations of all the shards in the cluster + +The response is an internal representation of the cluster state and its format +may change from version to version. If possible, you should obtain any +information from the cluster state using the other, more stable, +<>. [source,js] -------------------------------------------------- @@ -10,35 +24,36 @@ GET /_cluster/state -------------------------------------------------- // CONSOLE -The response provides the cluster name, the total compressed size -of the cluster state (its size when serialized for transmission over -the network), and the cluster state itself, which can be filtered to -only retrieve the parts of interest, as described below. +The response provides the cluster state itself, which can be filtered to only +retrieve the parts of interest as described below. -The cluster's `cluster_uuid` is also returned as part of the top-level -response, in addition to the `metadata` section. added[6.4.0] +The cluster's `cluster_uuid` is also returned as part of the top-level response, +in addition to the `metadata` section. added[6.4.0] NOTE: While the cluster is still forming, it is possible for the `cluster_uuid` to be `_na_` as well as the cluster state's version to be `-1`. -By default, the cluster state request is routed to the master node, to -ensure that the latest cluster state is returned. -For debugging purposes, you can retrieve the cluster state local to a -particular node by adding `local=true` to the query string. +By default, the cluster state request is routed to the master node, to ensure +that the latest cluster state is returned. For debugging purposes, you can +retrieve the cluster state local to a particular node by adding `local=true` to +the query string. [float] === Response Filters -As the cluster state can grow (depending on the number of shards and indices, your mapping, templates), -it is possible to filter the cluster state response specifying the parts in the URL. +The cluster state contains information about all the indices in the cluster, +including their mappings, as well as templates and other metadata. This means it +can sometimes be quite large. To avoid the need to process all this information +you can request only the part of the cluster state that you need: [source,js] -------------------------------------------------- +GET /_cluster/state/{metrics} GET /_cluster/state/{metrics}/{indices} -------------------------------------------------- // CONSOLE -`metrics` can be a comma-separated list of +`{metrics}` is a comma-separated list of the following options. `version`:: Shows the cluster state version. @@ -50,15 +65,23 @@ GET /_cluster/state/{metrics}/{indices} Shows the `nodes` part of the response `routing_table`:: - Shows the `routing_table` part of the response. If you supply a comma separated list of indices, the returned output will only contain the indices listed. + Shows the `routing_table` part of the response. If you supply a comma + separated list of indices, the returned output will only contain the routing + table for these indices. `metadata`:: - Shows the `metadata` part of the response. If you supply a comma separated list of indices, the returned output will only contain the indices listed. + Shows the `metadata` part of the response. If you supply a comma separated + list of indices, the returned output will only contain metadata for these + indices. `blocks`:: - Shows the `blocks` part of the response + Shows the `blocks` part of the response. + +`_all`:: + Shows all metrics. -The following example returns only `metadata` and `routing_table` data for the `foo` and `bar` indices: +The following example returns only `metadata` and `routing_table` data for the +`foo` and `bar` indices: [source,js] -------------------------------------------------- @@ -74,7 +97,8 @@ GET /_cluster/state/_all/foo,bar -------------------------------------------------- // CONSOLE -And this example return only `blocks` data: +Finally, this example return only the `blocks` metadata: + [source,js] -------------------------------------------------- GET /_cluster/state/blocks diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 83515e32bf046..38028d8cf109f 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -155,10 +155,12 @@ Will return, for example: "max_uptime_in_millis": 13737, "versions": [ { - "version": "1.8.0_74", - "vm_name": "Java HotSpot(TM) 64-Bit Server VM", - "vm_version": "25.74-b02", + "version": "12", + "vm_name": "OpenJDK 64-Bit Server VM", + "vm_version": "12+33", "vm_vendor": "Oracle Corporation", + "bundled_jdk": true, + "using_bundled_jdk": true, "count": 1 } ], @@ -200,6 +202,7 @@ Will return, for example: // TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/] // TESTRESPONSE[s/"network_types": \{[^\}]*\}/"network_types": $body.$_path/] // TESTRESPONSE[s/"discovery_types": \{[^\}]*\}/"discovery_types": $body.$_path/] +// TESTRESPONSE[s/: true|false/: $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] // These replacements do a few things: diff --git a/docs/reference/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc index 2ca489b3a60a9..6087fe8440a0e 100644 --- a/docs/reference/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -3,7 +3,7 @@ [[certgen]] == elasticsearch-certgen -deprecated[6.1,Replaced by <>.] +deprecated[6.1,"Replaced by <>."] The `elasticsearch-certgen` command simplifies the creation of certificate authorities (CA), certificate signing requests (CSR), and signed certificates diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 06e9dc53bd9b6..6f4d3224d7aeb 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -177,14 +177,17 @@ with the `ca` parameter. `--pass `:: Specifies the password for the generated private keys. + -Keys stored in PKCS#12 format are always password protected. +Keys stored in PKCS#12 format are always password protected, however, +this password may be _blank_. If you want to specify a blank password +without a prompt, use `--pass ""` (with no `=`) on the command line. + Keys stored in PEM format are password protected only if the `--pass` parameter is specified. If you do not supply an argument for the `--pass` parameter, you are prompted for a password. -+ -If you want to specify a _blank_ password (without prompting), use -`--pass ""` (with no `=`). +Encrypted PEM files do not support blank passwords (if you do not +wish to password-protect your PEM keys, then do not specify +`--pass`). + `--pem`:: Generates certificates and keys in PEM format instead of PKCS#12. This parameter cannot be used with the `csr` parameter. diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 8f4d178a99296..a13ea58c27d3e 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -10,6 +10,7 @@ tasks from the command line: * <> * <> * <> +* <> * <> * <> * <> @@ -21,6 +22,7 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] include::migrate-tool.asciidoc[] +include::node-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] include::shard-tool.asciidoc[] diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc new file mode 100644 index 0000000000000..f070d11aa8fb0 --- /dev/null +++ b/docs/reference/commands/node-tool.asciidoc @@ -0,0 +1,425 @@ +[[node-tool]] +== elasticsearch-node + +The `elasticsearch-node` command enables you to perform certain unsafe +operations on a node that are only possible while it is shut down. This command +allows you to adjust the <> of a node and may be able to +recover some data after a disaster. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster + [--ordinal ] [-E ] + [-h, --help] ([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +This tool has three modes: + +* `elasticsearch-node repurpose` can be used to delete unwanted data from a + node if it used to be a <> or a + <> but has been repurposed not to have one + or other of these roles. + +* `elasticsearch-node unsafe-bootstrap` can be used to perform _unsafe cluster + bootstrapping_. It forces one of the nodes to form a brand-new cluster on + its own, using its local copy of the cluster metadata. + +* `elasticsearch-node detach-cluster` enables you to move nodes from one + cluster to another. This can be used to move nodes into a new cluster + created with the `elasticsearch-node unsafe-bootstap` command. If unsafe + cluster bootstrapping was not possible, it also enables you to move nodes + into a brand-new cluster. + +[[node-tool-repurpose]] +[float] +==== Changing the role of a node + +There may be situations where you want to repurpose a node without following +the <>. The `elasticsearch-node +repurpose` tool allows you to delete any excess on-disk data and start a node +after repurposing it. + +The intended use is: + +* Stop the node +* Update `elasticsearch.yml` by setting `node.master` and `node.data` as + desired. +* Run `elasticsearch-node repurpose` on the node +* Start the node + +If you run `elasticsearch-node repurpose` on a node with `node.data: false` and +`node.master: true` then it will delete any remaining shard data on that node, +but it will leave the index and cluster metadata alone. If you run +`elasticsearch-node repurpose` on a node with `node.data: false` and +`node.master: false` then it will delete any remaining shard data and index +metadata, but it will leave the cluster metadata alone. + +[WARNING] +Running this command can lead to data loss for the indices mentioned if the +data contained is not available on other nodes in the cluster. Only run this +tool if you understand and accept the possible consequences, and only after +determining that the node cannot be repurposed cleanly. + +The tool provides a summary of the data to be deleted and asks for confirmation +before making any changes. You can get detailed information about the affected +indices and shards by passing the verbose (`-v`) option. + +[float] +==== Recovering data after a disaster + +Sometimes {es} nodes are temporarily stopped, perhaps because of the need to +perform some maintenance activity or perhaps because of a hardware failure. +After you resolve the temporary condition and restart the node, +it will rejoin the cluster and continue normally. Depending on your +configuration, your cluster may be able to remain completely available even +while one or more of its nodes are stopped. + +Sometimes it might not be possible to restart a node after it has stopped. For +example, the node's host may suffer from a hardware problem that cannot be +repaired. If the cluster is still available then you can start up a fresh node +on another host and {es} will bring this node into the cluster in place of the +failed node. + +Each node stores its data in the data directories defined by the +<>. This means that in a disaster you can +also restart a node by moving its data directories to another host, presuming +that those data directories can be recovered from the faulty host. + +{es} <> in order to elect a master and to update the cluster +state. This means that if you have three master-eligible nodes then the cluster +will remain available even if one of them has failed. However if two of the +three master-eligible nodes fail then the cluster will be unavailable until at +least one of them is restarted. + +In very rare circumstances it may not be possible to restart enough nodes to +restore the cluster's availability. If such a disaster occurs, you should +build a new cluster from a recent snapshot and re-import any data that was +ingested since that snapshot was taken. + +However, if the disaster is serious enough then it may not be possible to +recover from a recent snapshot either. Unfortunately in this case there is no +way forward that does not risk data loss, but it may be possible to use the +`elasticsearch-node` tool to construct a new cluster that contains some of the +data from the failed cluster. + + +[[node-tool-unsafe-bootstrap]] +[float] +===== Unsafe cluster bootstrapping + +If there is at least one remaining master-eligible node, but it is not possible +to restart a majority of them, then the `elasticsearch-node unsafe-bootstrap` +command will unsafely override the cluster's <> as if performing another +<>. +The target node can then form a new cluster on its own by using +the cluster metadata held locally on the target node. + +[WARNING] +These steps can lead to arbitrary data loss since the target node may not hold the latest cluster +metadata, and this out-of-date metadata may make it impossible to use some or +all of the indices in the cluster. + +Since unsafe bootstrapping forms a new cluster containing a single node, once +you have run it you must use the <> to migrate any other surviving nodes from the failed +cluster into this new cluster. + +When you run the `elasticsearch-node unsafe-bootstrap` tool it will analyse the +state of the node and ask for confirmation before taking any action. Before +asking for confirmation it reports the term and version of the cluster state on +the node on which it runs as follows: + +[source,txt] +---- +Current node cluster state (term, version) pair is (4, 12) +---- + +If you have a choice of nodes on which to run this tool then you should choose +one with a term that is as large as possible. If there is more than one +node with the same term, pick the one with the largest version. +This information identifies the node with the freshest cluster state, which minimizes the +quantity of data that might be lost. For example, if the first node reports +`(4, 12)` and a second node reports `(5, 3)`, then the second node is preferred +since its term is larger. However if the second node reports `(3, 17)` then +the first node is preferred since its term is larger. If the second node +reports `(4, 10)` then it has the same term as the first node, but has a +smaller version, so the first node is preferred. + +[WARNING] +Running this command can lead to arbitrary data loss. Only run this tool if you +understand and accept the possible consequences and have exhausted all other +possibilities for recovery of your cluster. + +The sequence of operations for using this tool are as follows: + +1. Make sure you have really lost access to at least half of the +master-eligible nodes in the cluster, and they cannot be repaired or recovered +by moving their data paths to healthy hardware. +2. Stop **all** remaining nodes. +3. Choose one of the remaining master-eligible nodes to become the new elected +master as described above. +4. On this node, run the `elasticsearch-node unsafe-bootstrap` command as shown +below. Verify that the tool reported `Master node was successfully +bootstrapped`. +5. Start this node and verify that it is elected as the master node. +6. Run the <>, described below, on every other node in the cluster. +7. Start all other nodes and verify that each one joins the cluster. +8. Investigate the data in the cluster to discover if any was lost during this +process. + +When you run the tool it will make sure that the node that is being used to +bootstrap the cluster is not running. It is important that all other +master-eligible nodes are also stopped while this tool is running, but the tool +does not check this. + +The message `Master node was successfully bootstrapped` does not mean that +there has been no data loss, it just means that tool was able to complete its +job. + +[[node-tool-detach-cluster]] +[float] +===== Detaching nodes from their cluster + +It is unsafe for nodes to move between clusters, because different clusters +have completely different cluster metadata. There is no way to safely merge the +metadata from two clusters together. + +To protect against inadvertently joining the wrong cluster, each cluster +creates a unique identifier, known as the _cluster UUID_, when it first starts +up. Every node records the UUID of its cluster and refuses to join a +cluster with a different UUID. + +However, if a node's cluster has permanently failed then it may be desirable to +try and move it into a new cluster. The `elasticsearch-node detach-cluster` +command lets you detach a node from its cluster by resetting its cluster UUID. +It can then join another cluster with a different UUID. + +For example, after unsafe cluster bootstrapping you will need to detach all the +other surviving nodes from their old cluster so they can join the new, +unsafely-bootstrapped cluster. + +Unsafe cluster bootstrapping is only possible if there is at least one +surviving master-eligible node. If there are no remaining master-eligible nodes +then the cluster metadata is completely lost. However, the individual data +nodes also contain a copy of the index metadata corresponding with their +shards. This sometimes allows a new cluster to import these shards as +<>. You can sometimes +recover some indices after the loss of all master-eligible nodes in a cluster +by creating a new cluster and then using the `elasticsearch-node +detach-cluster` command to move any surviving nodes into this new cluster. + +There is a risk of data loss when importing a dangling index because data nodes +may not have the most recent copy of the index metadata and do not have any +information about <>. This +means that a stale shard copy may be selected to be the primary, and some of +the shards may be incompatible with the imported mapping. + +[WARNING] +Execution of this command can lead to arbitrary data loss. Only run this tool +if you understand and accept the possible consequences and have exhausted all +other possibilities for recovery of your cluster. + +The sequence of operations for using this tool are as follows: + +1. Make sure you have really lost access to every one of the master-eligible +nodes in the cluster, and they cannot be repaired or recovered by moving their +data paths to healthy hardware. +2. Start a new cluster and verify that it is healthy. This cluster may comprise +one or more brand-new master-eligible nodes, or may be an unsafely-bootstrapped +cluster formed as described above. +3. Stop **all** remaining data nodes. +4. On each data node, run the `elasticsearch-node detach-cluster` tool as shown +below. Verify that the tool reported `Node was successfully detached from the +cluster`. +5. If necessary, configure each data node to +<>. +6. Start each data node and verify that it has joined the new cluster. +7. Wait for all recoveries to have completed, and investigate the data in the +cluster to discover if any was lost during this process. + +The message `Node was successfully detached from the cluster` does not mean +that there has been no data loss, it just means that tool was able to complete +its job. + + +[float] +=== Parameters + +`repurpose`:: Delete excess data when a node's roles are changed. + +`unsafe-bootstrap`:: Specifies to unsafely bootstrap this node as a new +one-node cluster. + +`detach-cluster`:: Specifies to unsafely detach this node from its cluster so +it can join a different cluster. + +`--ordinal `:: If there is <> then this specifies which node to target. Defaults +to `0`, meaning to use the first node in the data path. + +`-E `:: Configures a setting. + +`-h, --help`:: Returns all of the command parameters. + +`-s, --silent`:: Shows minimal output. + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +[float] +==== Repurposing a node as a dedicated master node (master: true, data: false) + +In this example, a former data node is repurposed as a dedicated master node. +First update the node's settings to `node.master: true` and `node.data: false` +in its `elasticsearch.yml` config file. Then run the `elasticsearch-node +repurpose` command to find and remove excess shard data: + +[source,txt] +---- +node$ ./bin/elasticsearch-node repurpose + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Found 2 shards in 2 indices to clean up +Use -v to see list of paths and indices affected +Node is being re-purposed as master and no-data. Clean-up of shard data will be performed. +Do you want to proceed? +Confirm [y/N] y +Node successfully repurposed to master and no-data. +---- + +[float] +==== Repurposing a node as a coordinating-only node (master: false, data: false) + +In this example, a node that previously held data is repurposed as a +coordinating-only node. First update the node's settings to `node.master: +false` and `node.data: false` in its `elasticsearch.yml` config file. Then run +the `elasticsearch-node repurpose` command to find and remove excess shard data +and index metadata: + +[source,txt] +---- +node$./bin/elasticsearch-node repurpose + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Found 2 indices (2 shards and 2 index meta data) to clean up +Use -v to see list of paths and indices affected +Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed. +Do you want to proceed? +Confirm [y/N] y +Node successfully repurposed to no-master and no-data. +---- + +[float] +==== Unsafe cluster bootstrapping + +Suppose your cluster had five master-eligible nodes and you have permanently +lost three of them, leaving two nodes remaining. + +* Run the tool on the first remaining node, but answer `n` at the confirmation + step. + +[source,txt] +---- +node_1$ ./bin/elasticsearch-node unsafe-bootstrap + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Current node cluster state (term, version) pair is (4, 12) + +You should only run this tool if you have permanently lost half or more +of the master-eligible nodes in this cluster, and you cannot restore the +cluster from a snapshot. This tool can cause arbitrary data loss and its +use should be your last resort. If you have multiple surviving master +eligible nodes, you should run this tool on the node with the highest +cluster state (term, version) pair. + +Do you want to proceed? + +Confirm [y/N] n +---- + +* Run the tool on the second remaining node, and again answer `n` at the + confirmation step. + +[source,txt] +---- +node_2$ ./bin/elasticsearch-node unsafe-bootstrap + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Current node cluster state (term, version) pair is (5, 3) + +You should only run this tool if you have permanently lost half or more +of the master-eligible nodes in this cluster, and you cannot restore the +cluster from a snapshot. This tool can cause arbitrary data loss and its +use should be your last resort. If you have multiple surviving master +eligible nodes, you should run this tool on the node with the highest +cluster state (term, version) pair. + +Do you want to proceed? + +Confirm [y/N] n +---- + +* Since the second node has a greater term it has a fresher cluster state, so + it is better to unsafely bootstrap the cluster using this node: + +[source,txt] +---- +node_2$ ./bin/elasticsearch-node unsafe-bootstrap + + WARNING: Elasticsearch MUST be stopped before running this tool. + +Current node cluster state (term, version) pair is (5, 3) + +You should only run this tool if you have permanently lost half or more +of the master-eligible nodes in this cluster, and you cannot restore the +cluster from a snapshot. This tool can cause arbitrary data loss and its +use should be your last resort. If you have multiple surviving master +eligible nodes, you should run this tool on the node with the highest +cluster state (term, version) pair. + +Do you want to proceed? + +Confirm [y/N] y +Master node was successfully bootstrapped +---- + +[float] +==== Detaching nodes from their cluster + +After unsafely bootstrapping a new cluster, run the `elasticsearch-node +detach-cluster` command to detach all remaining nodes from the failed cluster +so they can join the new cluster: + +[source, txt] +---- +node_3$ ./bin/elasticsearch-node detach-cluster + + WARNING: Elasticsearch MUST be stopped before running this tool. + +You should only run this tool if you have permanently lost all of the +master-eligible nodes in this cluster and you cannot restore the cluster +from a snapshot, or you have already unsafely bootstrapped a new cluster +by running `elasticsearch-node unsafe-bootstrap` on a master-eligible +node that belonged to the same cluster as this node. This tool can cause +arbitrary data loss and its use should be your last resort. + +Do you want to proceed? + +Confirm [y/N] y +Node was successfully detached from the cluster +---- diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index 6e6d3dd75ed21..3dcc9001534fa 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -3,8 +3,8 @@ [[setup-passwords]] == elasticsearch-setup-passwords -The `elasticsearch-setup-passwords` command sets the passwords for the built-in -`elastic`, `kibana`, `logstash_system`, `beats_system`, and `apm_system` users. +The `elasticsearch-setup-passwords` command sets the passwords for the +{stack-ov}/built-in-users.html[built-in users]. [float] === Synopsis diff --git a/docs/reference/commands/syskeygen.asciidoc b/docs/reference/commands/syskeygen.asciidoc index 3ae7456448d83..06d8330a1222a 100644 --- a/docs/reference/commands/syskeygen.asciidoc +++ b/docs/reference/commands/syskeygen.asciidoc @@ -21,7 +21,8 @@ bin/elasticsearch-syskeygen The command generates a `system_key` file, which you can use to symmetrically encrypt sensitive data. For example, you can use this key to prevent {watcher} -from returning and storing information that contains clear text credentials. See {xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. +from returning and storing information that contains clear text credentials. See +<>. IMPORTANT: The system key is a symmetric key, so the same key must be used on every node in the cluster. diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 16e93ac196c8d..e106c2b16eea7 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -36,15 +36,15 @@ optional_source\n -------------------------------------------------- // NOTCONSOLE -*NOTE*: the final line of data must end with a newline character `\n`. Each newline character +*NOTE*: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -The possible actions are `index`, `create`, `delete` and `update`. +The possible actions are `index`, `create`, `delete`, and `update`. `index` and `create` expect a source on the next line, and have the same semantics as the `op_type` parameter to the standard index API (i.e. create will fail if a document with the same -index and type exists already, whereas index will add or replace a +index exists already, whereas index will add or replace a document as necessary). `delete` does not expect a source on the following line, and has the same semantics as the standard delete API. `update` expects that the partial doc, upsert and script and its options @@ -172,9 +172,8 @@ The result of this bulk operation is: // TESTRESPONSE[s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/] // TESTRESPONSE[s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/] -The endpoints are `/_bulk`, `/{index}/_bulk`, and `{index}/{type}/_bulk`. -When the index or the index/type are provided, they will be used by -default on bulk items that don't provide them explicitly. +The endpoints are `/_bulk` and `/{index}/_bulk`. When the index is provided, it +will be used by default on bulk items that don't provide it explicitly. A note on the format. The idea here is to make processing of this as fast as possible. As some of the actions will be redirected to other @@ -215,7 +214,7 @@ documents. See <> for more details. Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index / delete operation based on the `_version` mapping. It also -support the `version_type` (see <>) +support the `version_type` (see <>). [float] [[bulk-routing]] @@ -246,20 +245,20 @@ NOTE: Only the shards that receive the bulk request will be affected by `refresh`. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to -refresh. The other two shards of that make up the index do not +refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. [float] [[bulk-update]] === Update -When using `update` action `retry_on_conflict` can be used as field in +When using the `update` action, `retry_on_conflict` can be used as a field in the action itself (not in the extra payload line), to specify how many times an update should be retried in the case of a version conflict. -The `update` action payload, supports the following options: `doc` +The `update` action payload supports the following options: `doc` (partial document), `upsert`, `doc_as_upsert`, `script`, `params` (for -script), `lang` (for script) and `_source`. See update documentation for details on +script), `lang` (for script), and `_source`. See update documentation for details on the options. Example with update actions: [source,js] @@ -283,4 +282,9 @@ POST _bulk [[bulk-security]] === Security -See <> +See <>. + +[float] +[[bulk-partial-responses]] +=== Partial responses +To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/docs/concurrency-control.asciidoc b/docs/reference/docs/concurrency-control.asciidoc index 780a9c7cf76fc..eeb5aca4ed032 100644 --- a/docs/reference/docs/concurrency-control.asciidoc +++ b/docs/reference/docs/concurrency-control.asciidoc @@ -31,7 +31,7 @@ PUT products/_doc/1567 // CONSOLE You can see the assigned sequence number and primary term in the -the `_seq_no` and `_primary_term` fields of the response: +`_seq_no` and `_primary_term` fields of the response: [source,js] -------------------------------------------------- @@ -53,8 +53,8 @@ the `_seq_no` and `_primary_term` fields of the response: // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 2/"_primary_term" : $body._primary_term/] -Elasticsearch keeps tracks of the sequence number and primary of the last -operation to have changed each of the document it stores. The sequence number +Elasticsearch keeps tracks of the sequence number and primary term of the last +operation to have changed each of the documents it stores. The sequence number and primary term are returned in the `_seq_no` and `_primary_term` fields in the response of the <>: diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 47af258204fdb..28389fb05ba94 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -5,12 +5,12 @@ [float] === Introduction -Each index in Elasticsearch is <> +Each index in Elasticsearch is <> and each shard can have multiple copies. These copies are known as a _replication group_ and must be kept in sync when documents are added or removed. If we fail to do so, reading from one copy will result in very different results than reading from another. The process of keeping the shard copies in sync and serving reads from them is what we call the _data replication model_. -Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the +Elasticsearch’s data replication model is based on the _primary-backup model_ and is described very well in the https://www.microsoft.com/en-us/research/publication/pacifica-replication-in-log-based-distributed-storage-systems/[PacificA paper] of Microsoft Research. That model is based on having a single copy from the replication group that acts as the primary shard. The other copies are called _replica shards_. The primary serves as the main entry point for all indexing operations. It is in charge of @@ -23,7 +23,7 @@ it has for various interactions between write and read operations. [float] === Basic write model -Every indexing operation in Elasticsearch is first resolved to a replication group using <>, +Every indexing operation in Elasticsearch is first resolved to a replication group using <>, typically based on the document ID. Once the replication group has been determined, the operation is forwarded internally to the current _primary shard_ of the group. The primary shard is responsible for validating the operation and forwarding it to the other replicas. Since replicas can be offline, the primary @@ -50,7 +50,7 @@ configuration mistake could cause an operation to fail on a replica despite it b are infrequent but the primary has to respond to them. In the case that the primary itself fails, the node hosting the primary will send a message to the master about it. The indexing -operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a +operation will wait (up to 1 minute, by <>) for the master to promote one of the replicas to be a new primary. The operation will then be forwarded to the new primary for processing. Note that the master also monitors the health of the nodes and may decide to proactively demote a primary. This typically happens when the node holding the primary is isolated from the cluster by a networking issue. See <> for more details. @@ -60,8 +60,8 @@ when executing it on the replica shards. This may be caused by an actual failure issue preventing the operation from reaching the replica (or preventing the replica from responding). All of these share the same end result: a replica which is part of the in-sync replica set misses an operation that is about to be acknowledged. In order to avoid violating the invariant, the primary sends a message to the master requesting -that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged -by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start +that the problematic shard be removed from the in-sync replica set. Only once removal of the shard has been acknowledged +by the master does the primary acknowledge the operation. Note that the master will also instruct another node to start building a new shard copy in order to restore the system to a healthy state. [[demoted-primary]] @@ -72,13 +72,13 @@ will be rejected by the replicas. When the primary receives a response from the it is no longer the primary then it will reach out to the master and will learn that it has been replaced. The operation is then routed to the new primary. -.What happens if there are no replicas? +.What happens if there are no replicas? ************ This is a valid scenario that can happen due to index configuration or simply because all the replicas have failed. In that case the primary is processing operations without any external validation, which may seem problematic. On the other hand, the primary cannot fail other shards on its own but request the master to do -so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed -that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed +so on its behalf. This means that the master knows that the primary is the only single good copy. We are therefore guaranteed +that the master will not promote any other (out-of-date) shard copy to be a new primary and that any operation indexed into the primary will not be lost. Of course, since at that point we are running with only single copy of the data, physical hardware issues can cause data loss. See <> for some mitigation options. ************ @@ -91,7 +91,7 @@ take non-trivial CPU power. One of the beauties of the primary-backup model is t (with the exception of in-flight operations). As such, a single in-sync copy is sufficient to serve read requests. When a read request is received by a node, that node is responsible for forwarding it to the nodes that hold the relevant shards, -collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow +collating the responses, and responding to the client. We call that node the _coordinating node_ for that request. The basic flow is as follows: . Resolve the read requests to the relevant shards. Note that since most searches will be sent to one or more indices, @@ -102,12 +102,24 @@ is as follows: . Combine the results and respond. Note that in the case of get by ID look up, only one shard is relevant and this step can be skipped. [float] -==== Failure handling +[[shard-failures]] +==== Shard failures + +When a shard fails to respond to a read request, the coordinating node sends the +request to another shard copy in the same replication group. Repeated failures +can result in no available shard copies. + +To ensure fast responses, the following APIs will +respond with partial results if one or more shards fail: + +* <> +* <> +* <> +* <> -When a shard fails to respond to a read request, the coordinating node will select another copy from the same replication group -and send the shard level search request to that copy instead. Repetitive failures can result in no shard copies being available. -In some cases, such as `_search`, Elasticsearch will prefer to respond fast, albeit with partial results, instead of waiting -for the issue to be resolved (partial results are indicated in the `_shards` header of the response). +Responses containing partial results still provide a `200 OK` HTTP status code. +Shard failures are indicated by the `timed_out` and `_shards` fields of +the response header. [float] === A few simple implications @@ -141,9 +153,9 @@ Dirty reads:: An isolated primary can expose writes that will not be acknowledge [float] === The Tip of the Iceberg -This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more -going on under the hood. Things like primary terms, cluster state publishing and master election all play a role in +This document provides a high level overview of how Elasticsearch deals with data. Of course, there is much much more +going on under the hood. Things like primary terms, cluster state publishing, and master election all play a role in keeping this system behaving correctly. This document also doesn't cover known and important bugs (both closed and open). We recognize that https://github.com/elastic/elasticsearch/issues?q=label%3Aresiliency[GitHub is hard to keep up with]. -To help people stay on top of those and we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] +To help people stay on top of those, we maintain a dedicated https://www.elastic.co/guide/en/elasticsearch/resiliency/current/index.html[resiliency page] on our website. We strongly advise reading it. diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 307d762abe79e..f8cb84ab790fa 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -2,7 +2,7 @@ == Delete By Query API The simplest usage of `_delete_by_query` just performs a deletion on every -document that match a query. Here is the API: +document that matches a query. Here is the API: [source,js] -------------------------------------------------- @@ -20,7 +20,7 @@ POST twitter/_delete_by_query <1> The query must be passed as a value to the `query` key, in the same way as the <>. You can also use the `q` -parameter in the same way as the search api. +parameter in the same way as the search API. That will return something like this: @@ -68,7 +68,7 @@ these documents. In case a search or bulk request got rejected, `_delete_by_quer failures that are returned by the failing bulk request are returned in the `failures` element; therefore it's possible for there to be quite a few failed entities. -If you'd like to count version conflicts rather than cause them to abort then +If you'd like to count version conflicts rather than cause them to abort, then set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. Back to the API format, this will delete tweets from the `twitter` index: @@ -140,14 +140,14 @@ POST twitter/_delete_by_query?scroll_size=5000 [float] === URL Parameters -In addition to the standard parameters like `pretty`, the Delete By Query API -also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout` +In addition to the standard parameters like `pretty`, the delete by query API +also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, and `scroll`. Sending the `refresh` will refresh all shards involved in the delete by query -once the request completes. This is different than the Delete API's `refresh` +once the request completes. This is different than the delete API's `refresh` parameter which causes just the shard that received the delete request -to be refreshed. Also unlike the Delete API it does not support `wait_for`. +to be refreshed. Also unlike the delete API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` @@ -163,10 +163,10 @@ for details. `timeout` controls how long each write request waits for unavailabl shards to become available. Both work exactly how they work in the <>. As `_delete_by_query` uses scroll search, you can also specify the `scroll` parameter to control how long it keeps the "search context" alive, -eg `?scroll=10m`, by default it's 5 minutes. +e.g. `?scroll=10m`. By default it's 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles rate at which `_delete_by_query` issues batches of +`1000`, etc.) and throttles the rate at which delete by query issues batches of delete operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. @@ -182,7 +182,7 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request large batch sizes will +Since the batch is issued as a single `_bulk` request, large batch sizes will cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". The default is `-1`. @@ -259,13 +259,13 @@ The number of version conflicts that the delete by query hit. `noops`:: This field is always equal to zero for delete by query. It only exists -so that delete by query, update by query and reindex APIs return responses +so that delete by query, update by query, and reindex APIs return responses with the same structure. `retries`:: The number of retries attempted by delete by query. `bulk` is the number -of bulk actions retried and `search` is the number of search actions retried. +of bulk actions retried, and `search` is the number of search actions retried. `throttled_millis`:: @@ -286,7 +286,7 @@ executed again in order to conform to `requests_per_second`. Array of failures if there were any unrecoverable errors during the process. If this is non-empty then the request aborted because of those failures. -Delete-by-query is implemented using batches and any failure causes the entire +Delete by query is implemented using batches, and any failure causes the entire process to abort but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from aborting on version conflicts. @@ -296,7 +296,7 @@ version conflicts. [[docs-delete-by-query-task-api]] === Works with the Task API -You can fetch the status of any running delete-by-query requests with the +You can fetch the status of any running delete by query requests with the <>: [source,js] @@ -306,7 +306,7 @@ GET _tasks?detailed=true&actions=*/delete/byquery // CONSOLE // TEST[skip:No tasks to retrieve] -The responses looks like: +The response looks like: [source,js] -------------------------------------------------- @@ -346,7 +346,7 @@ The responses looks like: } -------------------------------------------------- // TESTRESPONSE -<1> this object contains the actual status. It is just like the response json +<1> This object contains the actual status. It is just like the response JSON with the important addition of the `total` field. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request @@ -373,7 +373,7 @@ you to delete that document. [[docs-delete-by-query-cancel-task-api]] === Works with the Cancel Task API -Any Delete By Query can be canceled using the <>: +Any delete by query can be canceled using the <>: [source,js] -------------------------------------------------- @@ -403,26 +403,26 @@ POST _delete_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_seco The task ID can be found using the <>. -Just like when setting it on the `_delete_by_query` API `requests_per_second` +Just like when setting it on the delete by query API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query will -take effect on after completing the current batch. This prevents scroll +take effect after completing the current batch. This prevents scroll timeouts. [float] [[docs-delete-by-query-slice]] === Slicing -Delete-by-query supports <> to parallelize the deleting process. +Delete by query supports <> to parallelize the deleting process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. [float] [[docs-delete-by-query-manual-slice]] -==== Manually slicing +==== Manual slicing -Slice a delete-by-query manually by providing a slice id and total number of +Slice a delete by query manually by providing a slice id and total number of slices to each request: [source,js] @@ -498,7 +498,7 @@ Which results in a sensible `total` like this one: ==== Automatic slicing You can also let delete-by-query automatically parallelize using -<> to slice on `_id`. Use `slices` to specify the number of +<> to slice on `_id`. Use `slices` to specify the number of slices to use: [source,js] @@ -575,8 +575,8 @@ be larger than others. Expect larger slices to have a more even distribution. are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being -`_delete_by_query`ed. -* Each sub-requests gets a slightly different snapshot of the source index +deleted. +* Each sub-request gets a slightly different snapshot of the source index though these are all taken at approximately the same time. [float] @@ -588,8 +588,8 @@ number for most indices. If you're slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large, (for example, -500) choose a lower number as too many `slices` will hurt performance. Setting +number of shards in the index. If that number is large (for example, +500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 22301b98f1031..b242741abd522 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -88,11 +88,11 @@ DELETE /twitter/_doc/1?routing=kimchy // TEST[continued] The above will delete a tweet with id `1`, but will be routed based on the -user. Note, issuing a delete without the correct routing, will cause the +user. Note that issuing a delete without the correct routing will cause the document to not be deleted. When the `_routing` mapping is set as `required` and no routing value is -specified, the delete api will throw a `RoutingMissingException` and reject +specified, the delete API will throw a `RoutingMissingException` and reject the request. [float] diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 742e258ac65c4..e84df1d5a9689 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -34,7 +34,7 @@ The result of the above get operation is: -------------------------------------------------- // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -The above result includes the `_index`, `_id` and `_version` +The above result includes the `_index`, `_id`, and `_version` of the document we wish to retrieve, including the actual `_source` of the document if it could be found (as indicated by the `found` field in the response). @@ -76,7 +76,7 @@ GET twitter/_doc/0?_source=false // TEST[setup:twitter] If you only need one or two fields from the complete `_source`, you can use the `_source_includes` -& `_source_excludes` parameters to include or filter out that parts you need. This can be especially helpful +and `_source_excludes` parameters to include or filter out the parts you need. This can be especially helpful with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list of fields or wildcard expressions. Example: @@ -138,7 +138,7 @@ PUT twitter/_doc/1 // CONSOLE // TEST[continued] -... and try to retrieve it: +And then try to retrieve it: [source,js] -------------------------------------------------- @@ -236,7 +236,7 @@ You can also use the same source filtering parameters to control which parts of [source,js] -------------------------------------------------- -GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities' +GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- // CONSOLE // TEST[continued] @@ -266,7 +266,7 @@ GET twitter/_doc/2?routing=user1 // TEST[continued] The above will get a tweet with id `2`, but will be routed based on the -user. Note, issuing a get without the correct routing, will cause the +user. Note that issuing a get without the correct routing will cause the document not to be fetched. [float] @@ -307,7 +307,7 @@ indexing). The get operation gets hashed into a specific shard id. It then gets redirected to one of the replicas within that shard id and returns the result. The replicas are the primary shard and its replicas within that -shard id group. This means that the more replicas we will have, the +shard id group. This means that the more replicas we have, the better GET scaling we will have. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index e8a681567d622..2c617832488b5 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -39,11 +39,11 @@ The result of the above index operation is: -------------------------------------------------- // TESTRESPONSE[s/"successful" : 2/"successful" : 1/] -The `_shards` header provides information about the replication process of the index operation. +The `_shards` header provides information about the replication process of the index operation: -* `total` - Indicates to how many shard copies (primary and replica shards) the index operation should be executed on. -* `successful`- Indicates the number of shard copies the index operation succeeded on. -* `failed` - An array that contains replication related errors in the case an index operation failed on a replica shard. +`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on. +`successful`:: Indicates the number of shard copies the index operation succeeded on. +`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard. The index operation is successful in the case `successful` is at least 1. @@ -61,7 +61,7 @@ exist, and applies any <> that are configured. The index operation also creates a dynamic mapping if one does not already exist. By default, new fields and objects will automatically be added to the mapping definition if needed. Check out the <> section -for more information on mapping definitions, and the the +for more information on mapping definitions, and the <> API for information about updating mappings manually. @@ -299,16 +299,16 @@ Control when the changes made by this request are visible to search. See [[index-noop]] === Noop Updates -When updating a document using the index api a new version of the document is +When updating a document using the index API a new version of the document is always created even if the document hasn't changed. If this isn't acceptable -use the `_update` api with `detect_noop` set to true. This option isn't -available on the index api because the index api doesn't fetch the old source +use the `_update` API with `detect_noop` set to true. This option isn't +available on the index API because the index API doesn't fetch the old source and isn't able to compare it against the new source. There isn't a hard and fast rule about when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second -Elasticsearch runs on the shard with receiving the updates. +Elasticsearch runs on the shard receiving the updates. [float] [[timeout]] @@ -343,7 +343,7 @@ internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to -`external`. The value provided must be a numeric, long value greater or equal to 0, +`external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18. When using the external version type, the system checks to see if @@ -363,11 +363,11 @@ PUT twitter/_doc/1?version=2&version_type=external // CONSOLE // TEST[continued] -*NOTE:* versioning is completely real time, and is not affected by the +*NOTE:* Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, then the operation is executed without any version checks. -The above will succeed since the the supplied version of 2 is higher than +The above will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 http status code). @@ -387,15 +387,15 @@ Next to the `external` version type explained above, Elasticsearch also supports other types for specific use cases. Here is an overview of the different version types and their semantics. -`internal`:: only index the document if the given version is identical to the version +`internal`:: Only index the document if the given version is identical to the version of the stored document. -`external` or `external_gt`:: only index the document if the given version is strictly higher +`external` or `external_gt`:: Only index the document if the given version is strictly higher than the version of the stored document *or* if there is no existing document. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. -`external_gte`:: only index the document if the given version is *equal* or higher +`external_gte`:: Only index the document if the given version is *equal* or higher than the version of the stored document. If there is no existing document the operation will succeed as well. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 6d50a6a643a89..8d5dd2ad74a3d 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -1,7 +1,7 @@ [[docs-multi-get]] == Multi Get API -Multi GET API allows to get multiple documents based on an index, type +The Multi get API returns multiple documents based on an index, type, (optional) and id (and possibly routing). The response includes a `docs` array with all the fetched documents in order corresponding to the original multi-get request (if there was a failure for a specific get, an object containing this @@ -89,7 +89,7 @@ GET /test/_doc/_mget By default, the `_source` field will be returned for every document (if stored). Similar to the <> API, you can retrieve only parts of the `_source` (or not at all) by using the `_source` parameter. You can also use -the url parameters `_source`,`_source_includes` & `_source_excludes` to specify defaults, +the url parameters `_source`, `_source_includes`, and `_source_excludes` to specify defaults, which will be used when there are no per-document instructions. For example: @@ -181,7 +181,7 @@ GET /test/_doc/_mget?stored_fields=field1,field2 [[mget-routing]] === Routing -You can also specify routing value as a parameter: +You can also specify a routing value as a parameter: [source,js] -------------------------------------------------- @@ -204,11 +204,16 @@ GET /_mget?routing=key1 -------------------------------------------------- // CONSOLE -In this example, document `test/_doc/2` will be fetch from shard corresponding to routing key `key1` but -document `test/_doc/1` will be fetch from shard corresponding to routing key `key2`. +In this example, document `test/_doc/2` will be fetched from the shard corresponding to routing key `key1` but +document `test/_doc/1` will be fetched from the shard corresponding to routing key `key2`. [float] [[mget-security]] === Security -See <> +See <>. + +[float] +[[multi-get-partial-responses]] +=== Partial responses +To ensure fast responses, the multi get API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index e5051497ecda3..121e0c494d828 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -63,6 +63,7 @@ general, if you have a running system you don't wish to disturb then `refresh=wait_for` is a smaller modification. [float] +[[refresh_wait_for-force-refresh]] === `refresh=wait_for` Can Force a Refresh If a `refresh=wait_for` request comes in when there are already diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index b12a27d2e601b..5ffbf8f4362c2 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -118,8 +118,11 @@ POST _reindex // CONSOLE // TEST[setup:twitter] -By default version conflicts abort the `_reindex` process but you can just -count them by settings `"conflicts": "proceed"` in the request body: +By default, version conflicts abort the `_reindex` process. The `"conflicts"` request body +parameter can be used to instruct `_reindex` to proceed with the next document on version conflicts. +It is important to note that the handling of other error types is unaffected by the `"conflicts"` parameter. +When `"conflicts": "proceed"` is set in the request body, the `_reindex` process will continue on version conflicts +and return a count of version conflicts encountered: [source,js] -------------------------------------------------- @@ -249,7 +252,7 @@ POST _reindex // CONSOLE // TEST[setup:twitter] - +[[reindex-scripts]] Like `_update_by_query`, `_reindex` supports a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. This example bumps the version of the source document: @@ -423,16 +426,18 @@ POST _reindex // TEST[s/"password": "pass"//] The `host` parameter must contain a scheme, host, port (e.g. -`https://otherhost:9200`) and optional path (e.g. `https://otherhost:9200/proxy`). +`https://otherhost:9200`), and optional path (e.g. `https://otherhost:9200/proxy`). The `username` and `password` parameters are optional, and when they are present `_reindex` will connect to the remote Elasticsearch node using basic auth. Be sure to use `https` when using basic auth or the password will be sent in plain text. +There are a range of <> available to configure the behaviour of the + `https` connection. Remote hosts have to be explicitly whitelisted in elasticsearch.yml using the `reindex.remote.whitelist` property. It can be set to a comma delimited list of allowed remote `host` and `port` combinations (e.g. `otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*`). Scheme is -ignored by the whitelist - only host and port are used, for example: +ignored by the whitelist -- only host and port are used, for example: [source,yaml] @@ -518,12 +523,105 @@ POST _reindex // TEST[s/^/PUT source\n/] // TEST[s/otherhost:9200/\${host}/] +[float] +[[reindex-ssl]] +==== Configuring SSL parameters + +Reindex from remote supports configurable SSL settings. These must be +specified in the `elasticsearch.yml` file, with the exception of the +secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the `_reindex` request. + +The following settings are supported: + +`reindex.ssl.certificate_authorities`:: +List of paths to PEM encoded certificate files that should be trusted. +You cannot specify both `reindex.ssl.certificate_authorities` and +`reindex.ssl.truststore.path`. + +`reindex.ssl.truststore.path`:: +The path to the Java Keystore file that contains the certificates to trust. +This keystore can be in "JKS" or "PKCS#12" format. +You cannot specify both `reindex.ssl.certificate_authorities` and +`reindex.ssl.truststore.path`. + +`reindex.ssl.truststore.password`:: +The password to the truststore (`reindex.ssl.truststore.path`). +This setting cannot be used with `reindex.ssl.truststore.secure_password`. + +`reindex.ssl.truststore.secure_password` (<>):: +The password to the truststore (`reindex.ssl.truststore.path`). +This setting cannot be used with `reindex.ssl.truststore.password`. + +`reindex.ssl.truststore.type`:: +The type of the truststore (`reindex.ssl.truststore.path`). +Must be either `jks` or `PKCS12`. If the truststore path ends in ".p12", ".pfx" +or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. + +`reindex.ssl.verification_mode`:: +Indicates the type of verification to protect against man in the middle attacks +and certificate forgery. +One of `full` (verify the hostname and the certificate path), `certificate` +(verify the certificate path, but not the hostname) or `none` (perform no +verification - this is strongly discouraged in production environments). +Defaults to `full`. + +`reindex.ssl.certificate`:: +Specifies the path to the PEM encoded certificate (or certificate chain) to be +used for HTTP client authentication (if required by the remote cluster) +This setting requires that `reindex.ssl.key` also be set. +You cannot specify both `reindex.ssl.certificate` and `reindex.ssl.keystore.path`. + +`reindex.ssl.key`:: +Specifies the path to the PEM encoded private key associated with the +certificate used for client authentication (`reindex.ssl.certificate`). +You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. + +`reindex.ssl.key_passphrase`:: +Specifies the passphrase to decrypt the PEM encoded private key +(`reindex.ssl.key`) if it is encrypted. +Cannot be used with `reindex.ssl.secure_key_passphrase`. + +`reindex.ssl.secure_key_passphrase` (<>):: +Specifies the passphrase to decrypt the PEM encoded private key +(`reindex.ssl.key`) if it is encrypted. +Cannot be used with `reindex.ssl.key_passphrase`. + +`reindex.ssl.keystore.path`:: +Specifies the path to the keystore that contains a private key and certificate +to be used for HTTP client authentication (if required by the remote cluster). +This keystore can be in "JKS" or "PKCS#12" format. +You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. + +`reindex.ssl.keystore.type`:: +The type of the keystore (`reindex.ssl.keystore.path`). Must be either `jks` or `PKCS12`. +If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults +to `PKCS12`. Otherwise, it defaults to `jks`. + +`reindex.ssl.keystore.password`:: +The password to the keystore (`reindex.ssl.keystore.path`). This setting cannot be used +with `reindex.ssl.keystore.secure_password`. + +`reindex.ssl.keystore.secure_password` (<>):: +The password to the keystore (`reindex.ssl.keystore.path`). +This setting cannot be used with `reindex.ssl.keystore.password`. + +`reindex.ssl.keystore.key_password`:: +The password for the key in the keystore (`reindex.ssl.keystore.path`). +Defaults to the keystore password. This setting cannot be used with +`reindex.ssl.keystore.secure_key_password`. + +`reindex.ssl.keystore.secure_key_password` (<>):: +The password for the key in the keystore (`reindex.ssl.keystore.path`). +Defaults to the keystore password. This setting cannot be used with +`reindex.ssl.keystore.key_password`. + [float] === URL Parameters In addition to the standard parameters like `pretty`, the Reindex API also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, -`scroll` and `requests_per_second`. +`scroll`, and `requests_per_second`. Sending the `refresh` url parameter will cause all indexes to which the request wrote to be refreshed. This is different than the Index API's `refresh` @@ -547,7 +645,7 @@ the `scroll` parameter to control how long it keeps the "search context" alive, (e.g. `?scroll=10m`). The default value is 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles the rate at which `_reindex` issues batches of index +`1000`, etc.) and throttles the rate at which `_reindex` issues batches of index operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. @@ -560,7 +658,7 @@ The padding time is the difference between the batch size divided by the [source,txt] -------------------------------------------------- target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +`padding time` = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- Since the batch is issued as a single `_bulk` request, large batch sizes will @@ -744,7 +842,7 @@ The response looks like: } -------------------------------------------------- // TESTRESPONSE -<1> this object contains the actual status. It is identical to the response JSON +<1> This object contains the actual status. It is identical to the response JSON except for the important addition of the `total` field. `total` is the total number of operations that the `_reindex` expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request @@ -772,7 +870,7 @@ you to delete that document. [[docs-reindex-cancel-task-api]] === Works with the Cancel Task API -Any Reindex can be canceled using the <>. For +Any reindex can be canceled using the <>. For example: [source,js] @@ -805,8 +903,8 @@ The task ID can be found using the <>. Just like when setting it on the Reindex API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the -query takes effect immediately but rethrotting that slows down the query will -take effect on after completing the current batch. This prevents scroll +query takes effect immediately, but rethrottling that slows down the query will +take effect after completing the current batch. This prevents scroll timeouts. [float] @@ -1017,7 +1115,7 @@ be larger than others. Expect larger slices to have a more even distribution. are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being -`_reindex`ed. +reindexed. * Each sub-request gets a slightly different snapshot of the source index, though these are all taken at approximately the same time. @@ -1050,7 +1148,7 @@ partially completed index and starting over at that index. It also makes parallelizing the process fairly simple: split the list of indices to reindex and run each list in parallel. -One off bash scripts seem to work nicely for this: +One-off bash scripts seem to work nicely for this: [source,bash] ---------------------------------------------------------------- @@ -1122,7 +1220,7 @@ GET metricbeat-2016.05.31-1/_doc/1 // CONSOLE // TEST[continued] -The previous method can also be used in conjunction with <> +The previous method can also be used in conjunction with <> to load only the existing data into the new index and rename any fields if needed. [float] diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index a01bd30e4280a..883f6ad2a29e3 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -39,9 +39,9 @@ That will return something like this: // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] `_update_by_query` gets a snapshot of the index when it starts and indexes what -it finds using `internal` versioning. That means that you'll get a version +it finds using `internal` versioning. That means you'll get a version conflict if the document changes between the time when the snapshot was taken -and when the index request is processed. When the versions match the document +and when the index request is processed. When the versions match, the document is updated and the version number is incremented. NOTE: Since `internal` versioning does not support the value 0 as a valid @@ -55,10 +55,10 @@ aborted. While the first failure causes the abort, all failures that are returned by the failing bulk request are returned in the `failures` element; therefore it's possible for there to be quite a few failed entities. -If you want to simply count version conflicts not cause the `_update_by_query` -to abort you can set `conflicts=proceed` on the url or `"conflicts": "proceed"` +If you want to simply count version conflicts, and not cause the `_update_by_query` +to abort, you can set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body. The first example does this because it is just trying to -pick up an online mapping change and a version conflict simply means that the +pick up an online mapping change, and a version conflict simply means that the conflicting document was updated between the start of the `_update_by_query` and the time when it attempted to update the document. This is fine because that update will have picked up the online mapping update. @@ -92,7 +92,7 @@ POST twitter/_update_by_query?conflicts=proceed <1> The query must be passed as a value to the `query` key, in the same way as the <>. You can also use the `q` -parameter in the same way as the search api. +parameter in the same way as the search API. So far we've only been updating documents without changing their source. That is genuinely useful for things like @@ -121,7 +121,7 @@ POST twitter/_update_by_query Just as in <> you can set `ctx.op` to change the operation that is executed: - +[horizontal] `noop`:: Set `ctx.op = "noop"` if your script decides that it doesn't have to make any @@ -199,12 +199,12 @@ POST twitter/_update_by_query?pipeline=set-foo === URL Parameters In addition to the standard parameters like `pretty`, the Update By Query API -also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout` +also supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, and `scroll`. Sending the `refresh` will update all shards in the index being updated when the request completes. This is different than the Update API's `refresh` -parameter which causes just the shard that received the new data to be indexed. +parameter, which causes just the shard that received the new data to be indexed. Also unlike the Update API it does not support `wait_for`. If the request contains `wait_for_completion=false` then Elasticsearch will @@ -219,12 +219,12 @@ Elasticsearch can reclaim the space it uses. before proceeding with the request. See <> for details. `timeout` controls how long each write request waits for unavailable shards to become available. Both work exactly how they work in the -<>. As `_update_by_query` uses scroll search, you can also specify +<>. Because `_update_by_query` uses scroll search, you can also specify the `scroll` parameter to control how long it keeps the "search context" alive, -eg `?scroll=10m`, by default it's 5 minutes. +e.g. `?scroll=10m`. By default it's 5 minutes. `requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc) and throttles rate at which `_update_by_query` issues batches of +`1000`, etc.) and throttles the rate at which `_update_by_query` issues batches of index operations by padding each batch with a wait time. The throttling can be disabled by setting `requests_per_second` to `-1`. @@ -240,7 +240,7 @@ target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- -Since the batch is issued as a single `_bulk` request large batch sizes will +Since the batch is issued as a single `_bulk` request, large batch sizes will cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". The default is `-1`. @@ -283,6 +283,7 @@ The JSON response looks like this: -------------------------------------------------- // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] +[horizontal] `took`:: The number of milliseconds from start to end of the whole operation. @@ -319,8 +320,8 @@ the update by query returned a `noop` value for `ctx.op`. `retries`:: -The number of retries attempted by update-by-query. `bulk` is the number of bulk -actions retried and `search` is the number of search actions retried. +The number of retries attempted by update by query. `bulk` is the number of bulk +actions retried, and `search` is the number of search actions retried. `throttled_millis`:: @@ -341,8 +342,8 @@ executed again in order to conform to `requests_per_second`. Array of failures if there were any unrecoverable errors during the process. If this is non-empty then the request aborted because of those failures. -Update-by-query is implemented using batches and any failure causes the entire -process to abort but all failures in the current batch are collected into the +Update by query is implemented using batches. Any failure causes the entire +process to abort, but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from aborting on version conflicts. @@ -352,7 +353,7 @@ version conflicts. [[docs-update-by-query-task-api]] === Works with the Task API -You can fetch the status of all running update-by-query requests with the +You can fetch the status of all running update by query requests with the <>: [source,js] @@ -406,7 +407,7 @@ The responses looks like: -------------------------------------------------- // TESTRESPONSE -<1> this object contains the actual status. It is just like the response json +<1> This object contains the actual status. It is just like the response JSON with the important addition of the `total` field. `total` is the total number of operations that the reindex expects to perform. You can estimate the progress by adding the `updated`, `created`, and `deleted` fields. The request @@ -424,7 +425,7 @@ GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 The advantage of this API is that it integrates with `wait_for_completion=false` to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it them it'll come back with a +and `wait_for_completion=false` was set on it, then it'll come back with a `results` or an `error` field. The cost of this feature is the document that `wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to you to delete that document. @@ -434,7 +435,7 @@ you to delete that document. [[docs-update-by-query-cancel-task-api]] === Works with the Cancel Task API -Any Update By Query can be canceled using the <>: +Any update by query can be cancelled using the <>: [source,js] -------------------------------------------------- @@ -464,25 +465,25 @@ POST _update_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_seco The task ID can be found using the <>. -Just like when setting it on the `_update_by_query` API `requests_per_second` +Just like when setting it on the `_update_by_query` API, `requests_per_second` can be either `-1` to disable throttling or any decimal number like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the -query takes effect immediately but rethrotting that slows down the query will -take effect on after completing the current batch. This prevents scroll +query takes effect immediately, but rethrotting that slows down the query will +take effect after completing the current batch. This prevents scroll timeouts. [float] [[docs-update-by-query-slice]] === Slicing -Update-by-query supports <> to parallelize the updating process. +Update by query supports <> to parallelize the updating process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. [float] [[docs-update-by-query-manual-slice]] ==== Manual slicing -Slice an update-by-query manually by providing a slice id and total number of +Slice an update by query manually by providing a slice id and total number of slices to each request: [source,js] @@ -540,7 +541,7 @@ Which results in a sensible `total` like this one: [[docs-update-by-query-automatic-slice]] ==== Automatic slicing -You can also let update-by-query automatically parallelize using +You can also let update by query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of slices to use: @@ -605,8 +606,8 @@ be larger than others. Expect larger slices to have a more even distribution. are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that the using `size` with `slices` might not result in exactly `size` documents being -`_update_by_query`ed. -* Each sub-requests gets a slightly different snapshot of the source index +updated. +* Each sub-request gets a slightly different snapshot of the source index though these are all taken at approximately the same time. [float] diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 64c0f67bc722c..00cd66232190f 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -4,7 +4,7 @@ The update API allows to update a document based on a script provided. The operation gets the document (collocated with the shard) from the index, runs the script (with optional script language and parameters), -and index back the result (also allows to delete, or ignore the +and indexes back the result (also allows to delete, or ignore the operation). It uses versioning to make sure no updates have happened during the "get" and "reindex". @@ -46,8 +46,8 @@ POST test/_update/1 // CONSOLE // TEST[continued] -We can add a tag to the list of tags (note, if the tag exists, it -will still add it, since it's a list): +We can add a tag to the list of tags (if the tag exists, it + still gets added, since this is a list): [source,js] -------------------------------------------------- @@ -88,7 +88,7 @@ POST test/_update/1 // TEST[continued] In addition to `_source`, the following variables are available through -the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing` +the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). We can also add a new field to the document: @@ -116,7 +116,7 @@ POST test/_update/1 // TEST[continued] And, we can even change the operation that is executed. This example deletes -the doc if the `tags` field contain `green`, otherwise it does nothing +the doc if the `tags` field contains `green`, otherwise it does nothing (`noop`): [source,js] @@ -138,7 +138,7 @@ POST test/_update/1 [float] === Updates with a partial document -The update API also support passing a partial document, +The update API also supports passing a partial document, which will be merged into the existing document (simple recursive merge, inner merging of objects, replacing core "keys/values" and arrays). To fully replace the existing document, the <> should @@ -165,7 +165,7 @@ to put your field pairs of the partial document in the script itself. === Detecting noop updates If `doc` is specified its value is merged with the existing `_source`. -By default updates that don't change anything detect that they don't change anything and return "result": "noop" like this: +By default updates that don't change anything detect that they don't change anything and return `"result": "noop"` like this: [source,js] -------------------------------------------------- @@ -200,7 +200,7 @@ the request was ignored. -------------------------------------------------- // TESTRESPONSE -You can disable this behavior by setting "detect_noop": false like this: +You can disable this behavior by setting `"detect_noop": false` like this: [source,js] -------------------------------------------------- @@ -243,6 +243,7 @@ POST test/_update/1 // TEST[continued] [float] +[[scripted_upsert]] ==== `scripted_upsert` If you would like your script to run regardless of whether the document exists @@ -272,6 +273,7 @@ POST sessions/_update/dh3sgudg8gsrgl // TEST[continued] [float] +[[doc_as_upsert]] ==== `doc_as_upsert` Instead of sending a partial `doc` plus an `upsert` doc, setting @@ -323,18 +325,18 @@ See <> for details. `refresh`:: Control when the changes made by this request are visible to search. See -<>. +<>. `_source`:: Allows to control if and how the updated source should be returned in the response. By default the updated source is not returned. -See <> for details. +See <> for details. `version`:: -The update API uses the Elasticsearch's versioning support internally to make +The update API uses the Elasticsearch versioning support internally to make sure the document doesn't change during the update. You can use the `version` parameter to specify that the document should only be updated if its version matches the one specified. @@ -343,7 +345,7 @@ matches the one specified. .The update API does not support versioning other than internal ===================================================== -External (version types `external` & `external_gte`) or forced (version type `force`) +External (version types `external` and `external_gte`) or forced (version type `force`) versioning is not supported by the update API as it would result in Elasticsearch version numbers being out of sync with the external system. Use the <> instead. diff --git a/docs/reference/frozen-indices.asciidoc b/docs/reference/frozen-indices.asciidoc index 19a8e4318b0c2..60f07b58bcb7d 100644 --- a/docs/reference/frozen-indices.asciidoc +++ b/docs/reference/frozen-indices.asciidoc @@ -1,27 +1,56 @@ [role="xpack"] [testenv="basic"] [[frozen-indices]] -= Frozen Indices += Frozen indices [partintro] -- -Elasticsearch indices can require a significant amount of memory available in order to be open and searchable. Yet, not all indices need -to be writable at the same time and have different access patterns over time. For example, indices in the time series or logging use cases -are unlikely to be queried once they age out but still need to be kept around for retention policy purposes. - -In order to keep indices available and queryable for a longer period but at the same time reduce their hardware requirements they can be transitioned -into a frozen state. Once an index is frozen, all of its transient shard memory (aside from mappings and analyzers) -is moved to persistent storage. This allows for a much higher disk to heap storage ratio on individual nodes. Once an index is -frozen, it is made read-only and drops its transient data structures from memory. These data structures will need to be reloaded on demand (and subsequently dropped) for each search request that targets the frozen index. A search request that hits -one or more frozen shards will be executed on a throttled threadpool that ensures that we never search more than -`N` (`1` by default) searches concurrently (see <>). This protects nodes from exceeding the available memory due to incoming search requests. - -In contrast to ordinary open indices, frozen indices are expected to execute slowly and are not designed for high query load. Parallelism is -gained only on a per-node level and loading data-structures on demand is expected to be one or more orders of a magnitude slower than query -execution on a per shard level. Depending on the data in an index, a frozen index may execute searches in the seconds to minutes range, when the same index in an unfrozen state may execute the same search request in milliseconds. +{es} indices keep some data structures in memory to allow you to search them +efficiently and to index into them. If you have a lot of indices then the +memory required for these data structures can add up to a significant amount. +For indices that are searched frequently it is better to keep these structures +in memory because it takes time to rebuild them. However, you might access some +of your indices so rarely that you would prefer to release the corresponding +memory and rebuild these data structures on each search. + +For example, if you are using time-based indices to store log messages or time +series data then it is likely that older indices are searched much less often +than the more recent ones. Older indices also receive no indexing requests. +Furthermore, it is usually the case that searches of older indices are for +performing longer-term analyses for which a slower response is acceptable. + +If you have such indices then they are good candidates for becoming _frozen +indices_. {es} builds the transient data structures of each shard of a frozen +index each time that shard is searched, and discards these data structures as +soon as the search is complete. Because {es} does not maintain these transient +data structures in memory, frozen indices consume much less heap than normal +indices. This allows for a much higher disk-to-heap ratio than would otherwise +be possible. + +You can freeze the index using the <>. + +Searches performed on frozen indices use the small, dedicated, +<> to control the number of +concurrent searches that hit frozen shards on each node. This limits the amount +of extra memory required for the transient data structures corresponding to +frozen shards, which consequently protects nodes against excessive memory +consumption. + +Frozen indices are read-only: you cannot index into them. + +Searches on frozen indices are expected to execute slowly. Frozen indices are +not intended for high search load. It is possible that a search of a frozen +index may take seconds or minutes to complete, even if the same searches +completed in milliseconds when the indices were not frozen. + +To make a frozen index writable again, use the <>. + -- -== Best Practices +[role="xpack"] +[testenv="basic"] +[[best_practices]] +== Best practices Since frozen indices provide a much higher disk to heap ratio at the expense of search latency, it is advisable to allocate frozen indices to dedicated nodes to prevent searches on frozen indices influencing traffic on low latency nodes. There is significant overhead in loading @@ -40,10 +69,13 @@ POST /twitter/_forcemerge?max_num_segments=1 // CONSOLE // TEST[setup:twitter] +[role="xpack"] +[testenv="basic"] +[[searching_a_frozen_index]] == Searching a frozen index Frozen indices are throttled in order to limit memory consumptions per node. The number of concurrently loaded frozen indices per node is -limited by the number of threads in the <> threadpool, which is `1` by default. +limited by the number of threads in the <> threadpool, which is `1` by default. Search requests will not be executed against frozen indices by default, even if a frozen index is named explicitly. This is to prevent accidental slowdowns by targeting a frozen index by mistake. To include frozen indices a search request must be executed with the query parameter `ignore_throttled=false`. @@ -64,10 +96,13 @@ The default value for `pre_filter_shard_size` is `128` but it's recommended to s significant overhead associated with this pre-filter phase. ================================ +[role="xpack"] +[testenv="basic"] +[[monitoring_frozen_indices]] == Monitoring frozen indices Frozen indices are ordinary indices that use search throttling and a memory efficient shard implementation. For API's like the -`<>` frozen indicies may identified by an index's `search.throttled` property (`sth`). +<> frozen indices may identified by an index's `search.throttled` property (`sth`). [source,js] -------------------------------------------------- @@ -83,5 +118,5 @@ The response looks like: i sth twitter true -------------------------------------------------- -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index bee2ae5194477..3f4156b0c0011 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -1,406 +1,231 @@ [[getting-started]] -= Getting started += Getting started with {es} [partintro] -- +Ready to take {es} for a test drive and see for yourself how you can use the +REST APIs to store, search, and analyze data? -Elasticsearch is a highly scalable open-source full-text search and analytics engine. It allows you to store, search, and analyze big volumes of data quickly and in near real time. It is generally used as the underlying engine/technology that powers applications that have complex search features and requirements. +Follow this getting started tutorial to: -Here are a few sample use-cases that Elasticsearch could be used for: +. Get an {es} cluster up and running +. Index some sample documents +. Search for documents using the {es} query language +. Analyze the results using bucket and metrics aggregations -* You run an online web store where you allow your customers to search for products that you sell. In this case, you can use Elasticsearch to store your entire product catalog and inventory and provide search and autocomplete suggestions for them. -* You want to collect log or transaction data and you want to analyze and mine this data to look for trends, statistics, summarizations, or anomalies. In this case, you can use Logstash (part of the Elasticsearch/Logstash/Kibana stack) to collect, aggregate, and parse your data, and then have Logstash feed this data into Elasticsearch. Once the data is in Elasticsearch, you can run searches and aggregations to mine any information that is of interest to you. -* You run a price alerting platform which allows price-savvy customers to specify a rule like "I am interested in buying a specific electronic gadget and I want to be notified if the price of gadget falls below $X from any vendor within the next month". In this case you can scrape vendor prices, push them into Elasticsearch and use its reverse-search (Percolator) capability to match price movements against customer queries and eventually push the alerts out to the customer once matches are found. -* You have analytics/business-intelligence needs and want to quickly investigate, analyze, visualize, and ask ad-hoc questions on a lot of data (think millions or billions of records). In this case, you can use Elasticsearch to store your data and then use Kibana (part of the Elasticsearch/Logstash/Kibana stack) to build custom dashboards that can visualize aspects of your data that are important to you. Additionally, you can use the Elasticsearch aggregations functionality to perform complex business intelligence queries against your data. -For the rest of this tutorial, you will be guided through the process of getting Elasticsearch up and running, taking a peek inside it, and performing basic operations like indexing, searching, and modifying your data. At the end of this tutorial, you should have a good idea of what Elasticsearch is, how it works, and hopefully be inspired to see how you can use it to either build sophisticated search applications or to mine intelligence from your data. --- - -[[getting-started-concepts]] -== Basic Concepts - -There are a few concepts that are core to Elasticsearch. Understanding these concepts from the outset will tremendously help ease the learning process. - -[float] -=== Near Realtime (NRT) - -Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. - -[float] -=== Cluster - -A cluster is a collection of one or more nodes (servers) that together holds your entire data and provides federated indexing and search capabilities across all nodes. A cluster is identified by a unique name which by default is "elasticsearch". This name is important because a node can only be part of a cluster if the node is set up to join the cluster by its name. - -Make sure that you don't reuse the same cluster names in different -environments, otherwise you might end up with nodes joining the wrong cluster. -For instance you could use `logging-dev`, `logging-stage`, and `logging-prod` -for the development, staging, and production clusters. - -Note that it is valid and perfectly fine to have a cluster with only a single node in it. Furthermore, you may also have multiple independent clusters each with its own unique cluster name. - -[float] -=== Node - -A node is a single server that is part of your cluster, stores your data, and participates in the cluster's indexing and search -capabilities. Just like a cluster, a node is identified by a name which by default is a random Universally Unique IDentifier (UUID) that is assigned to the node at startup. You can define any node name you want if you do not want the default. This name is important for administration purposes where you want to identify which servers in your network correspond to which nodes in your Elasticsearch cluster. - -A node can be configured to join a specific cluster by the cluster name. By default, each node is set up to join a cluster named `elasticsearch` which means that if you start up a number of nodes on your network and--assuming they can discover each other--they will all automatically form and join a single cluster named `elasticsearch`. - -In a single cluster, you can have as many nodes as you want. Furthermore, if there are no other Elasticsearch nodes currently running on your network, starting a single node will by default form a new single-node cluster named `elasticsearch`. +Need more context? -[float] -=== Index - -An index is a collection of documents that have somewhat similar characteristics. For example, you can have an index for customer data, another index for a product catalog, and yet another index for order data. An index is identified by a name (that must be all lowercase) and this name is used to refer to the index when performing indexing, search, update, and delete operations against the documents in it. - -In a single cluster, you can define as many indexes as you want. - -[float] -=== Type - -deprecated[6.0.0,See <>] +Check out the <> to learn the lingo and understand the basics of +how {es} works. If you're already familiar with {es} and want to see how it works +with the rest of the stack, you might want to jump to the +{stack-gs}/get-started-elastic-stack.html[Elastic Stack +Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, +{beats}, and {ls}. -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. +TIP: The fastest way to get started with {es} is to +https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day +trial of {ess}] in the cloud. +-- -[float] -=== Document +[[getting-started-install]] +== Get {es} up and running -A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. Within an index, you can store as many documents as you want. +To take {es} for a test drive, you can create a +https://www.elastic.co/cloud/elasticsearch-service/signup[hosted deployment] on +the {ess} or set up a multi-node {es} cluster on your own +Linux, macOS, or Windows machine. -[[getting-started-shards-and-replicas]] [float] -=== Shards & Replicas - -An index can potentially store a large amount of data that can exceed the hardware limits of a single node. For example, a single index of a billion documents taking up 1TB of disk space may not fit on the disk of a single node or may be too slow to serve search requests from a single node alone. - -To solve this problem, Elasticsearch provides the ability to subdivide your index into multiple pieces called shards. When you create an index, you can simply define the number of shards that you want. Each shard is in itself a fully-functional and independent "index" that can be hosted on any node in the cluster. - -Sharding is important for two primary reasons: - -* It allows you to horizontally split/scale your content volume -* It allows you to distribute and parallelize operations across shards (potentially on multiple nodes) thus increasing performance/throughput - - -The mechanics of how a shard is distributed and also how its documents are aggregated back into search requests are completely managed by Elasticsearch and is transparent to you as the user. - -In a network/cloud environment where failures can be expected anytime, it is very useful and highly recommended to have a failover mechanism in case a shard/node somehow goes offline or disappears for whatever reason. To this end, Elasticsearch allows you to make one or more copies of your index's shards into what are called replica shards, or replicas for short. +[[run-elasticsearch-hosted]] +=== Run {es} on Elastic Cloud -Replication is important for two primary reasons: +When you create a deployment on the {es} Service, the service provisions +a three-node {es} cluster along with Kibana and APM. -* It provides high availability in case a shard/node fails. For this reason, it is important to note that a replica shard is never allocated on the same node as the original/primary shard that it was copied from. -* It allows you to scale out your search volume/throughput since searches can be executed on all replicas in parallel. +To create a deployment: +. Sign up for a https://www.elastic.co/cloud/elasticsearch-service/signup[free trial] +and verify your email address. +. Set a password for your account. +. Click **Create Deployment**. -To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). - -The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may also change the number of replicas dynamically anytime. You can change the number of shards for an existing index using the {ref}/indices-shrink-index.html[`_shrink`] and {ref}/indices-split-index.html[`_split`] APIs, however this is not a trivial task and pre-planning for the correct number of shards is the optimal approach. - -By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. - -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. -You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. - -With that out of the way, let's get started with the fun part... - -[[getting-started-install]] -== Installation - -[TIP] -============== -You can skip having to install Elasticsearch by using our -https://www.elastic.co/cloud/elasticsearch-service[hosted Elasticsearch Service] -on Elastic Cloud. The Elasticsearch Service is available on both AWS and GCP. -https://www.elastic.co/cloud/elasticsearch-service/signup[Try out the -Elasticsearch Service for free]. -============== - -Elasticsearch requires at least Java 8. Specifically as of this writing, it is recommended that you use the Oracle JDK version {jdk}. Java installation varies from platform to platform so we won't go into those details here. Oracle's recommended installation documentation can be found on http://docs.oracle.com/javase/8/docs/technotes/guides/install/install_overview.html[Oracle's website]. Suffice to say, before you install Elasticsearch, please check your Java version first by running (and then install/upgrade accordingly if needed): - -[source,sh] --------------------------------------------------- -java -version -echo $JAVA_HOME --------------------------------------------------- - -Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, a `DEB` or `RPM` package, or a Windows `MSI` installation package. +Once you've created a deployment, you're ready to <>. [float] -=== Installation example with tar +[[run-elasticsearch-local]] +=== Run {es} locally on Linux, macOS, or Windows -For simplicity, let's use the {ref}/zip-targz.html[tar] file. +When you create a deployment on the {ess}, a master node and +two data nodes are provisioned automatically. By installing from the tar or zip +archive, you can start multiple instances of {es} locally to see how a multi-node +cluster behaves. -Let's download the Elasticsearch {version} tar as follows: +To run a three-node {es} cluster locally: +. Download the {es} archive for your OS: ++ +Linux: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz[elasticsearch-{version}-linux-x86_64.tar.gz] ++ ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz +curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz -------------------------------------------------- // NOTCONSOLE - -Then extract it as follows: - ++ +macOS: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-darwin-x86_64.tar.gz[elasticsearch-{version}-darwin-x86_64.tar.gz] ++ ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -tar -xvf elasticsearch-{version}.tar.gz +curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-darwin-x86_64.tar.gz -------------------------------------------------- - -It will then create a bunch of files and folders in your current directory. We then go into the bin directory as follows: - +// NOTCONSOLE ++ +Windows: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip[elasticsearch-{version}-windows-x86_64.zip] + +. Extract the archive: ++ +Linux: ++ ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -cd elasticsearch-{version}/bin +tar -xvf elasticsearch-{version}-linux-x86_64.tar.gz -------------------------------------------------- - -And now we are ready to start our node and single cluster: - -[source,sh] ++ +macOS: ++ +["source","sh",subs="attributes,callouts"] -------------------------------------------------- -./elasticsearch +tar -xvf elasticsearch-{version}-darwin-x86_64.tar.gz -------------------------------------------------- - -[float] -=== Installation example with MSI Windows Installer - -For Windows users, we recommend using the {ref}/windows.html[MSI Installer package]. The package contains a graphical user interface (GUI) that guides you through the installation process. - -First, download the Elasticsearch {version} MSI from -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi. - -Then double-click the downloaded file to launch the GUI. Within the first screen, select the deployment directories: - -[[getting-started-msi-installer-locations]] -image::images/msi_installer/msi_installer_locations.png[] - -Then select whether to install as a service or start Elasticsearch manually as needed. -To align with the tar example, choose not to install as a service: - -[[getting-started-msi-installer-service]] -image::images/msi_installer/msi_installer_no_service.png[] - -For configuration, simply leave the default values: - -[[getting-started-msi-installer-configuration]] -image::images/msi_installer/msi_installer_configuration.png[] - -Again, to align with the tar example, uncheck all plugins to not install any plugins: - -[[getting-started-msi-installer-plugins]] -image::images/msi_installer/msi_installer_plugins.png[] - -After clicking the install button, Elasticsearch will be installed: - -[[getting-started-msi-installer-success]] -image::images/msi_installer/msi_installer_success.png[] - -By default, Elasticsearch will be installed at `%PROGRAMFILES%\Elastic\Elasticsearch`. Navigate here and go into the bin directory as follows: - -**with Command Prompt:** - -[source,sh] ++ +Windows PowerShell: ++ +["source","sh",subs="attributes,callouts"] -------------------------------------------------- -cd %PROGRAMFILES%\Elastic\Elasticsearch\bin +Expand-Archive elasticsearch-{version}-windows-x86_64.zip -------------------------------------------------- -**with PowerShell:** - -[source,powershell] +. Start {es} from the `bin` directory: ++ +Linux and macOS: ++ +["source","sh",subs="attributes,callouts"] -------------------------------------------------- -cd $env:PROGRAMFILES\Elastic\Elasticsearch\bin +cd elasticsearch-{version}/bin +./elasticsearch -------------------------------------------------- - -And now we are ready to start our node and single cluster: - -[source,sh] ++ +Windows: ++ +["source","sh",subs="attributes,callouts"] -------------------------------------------------- +cd %PROGRAMFILES%\Elastic\Elasticsearch\bin .\elasticsearch.exe -------------------------------------------------- ++ +You now have a single-node {es} cluster up and running! -[float] -[[successfully-running-node]] -=== Successfully running node - -If everything goes well with installation, you should see a bunch of messages that look like below: - +. Start two more instances of {es} so you can see how a typical multi-node +cluster behaves. You need to specify unique data and log paths +for each node. ++ +Linux and macOS: ++ ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -[2018-09-13T12:20:01,766][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] using [1] data paths, mounts [[/home (/dev/mapper/fedora-home)]], net usable_space [335.3gb], net total_space [410.3gb], types [ext4] -[2018-09-13T12:20:01,772][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] heap size [990.7mb], compressed ordinary object pointers [true] -[2018-09-13T12:20:01,774][INFO ][o.e.n.Node ] [localhost.localdomain] node name [localhost.localdomain], node ID [B0aEHNagTiWx7SYj-l4NTw] -[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] version[{version}], pid[13030], build[oss/zip/77fc20e/2018-09-13T15:37:57.478402Z], OS[Linux/4.16.11-100.fc26.x86_64/amd64], JVM["Oracle Corporation"/OpenJDK 64-Bit Server VM/10/10+46] -[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch.LN1ctLCi, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Djava.locale.providers=COMPAT, -XX:UseAVX=2, -Dio.netty.allocator.type=unpooled, -Des.path.home=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT, -Des.path.conf=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT/config, -Des.distribution.flavor=oss, -Des.distribution.type=zip] -[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [aggs-matrix-stats] -[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [analysis-common] -[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [ingest-common] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-expression] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-mustache] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-painless] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [mapper-extras] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [parent-join] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [percolator] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [rank-eval] -[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [reindex] -[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [repository-url] -[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [transport-netty4] -[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] no plugins loaded -[2018-09-13T12:20:04,657][INFO ][o.e.d.DiscoveryModule ] [localhost.localdomain] using discovery type [zen] -[2018-09-13T12:20:05,006][INFO ][o.e.n.Node ] [localhost.localdomain] initialized -[2018-09-13T12:20:05,007][INFO ][o.e.n.Node ] [localhost.localdomain] starting ... -[2018-09-13T12:20:05,202][INFO ][o.e.t.TransportService ] [localhost.localdomain] publish_address {127.0.0.1:9300}, bound_addresses {[::1]:9300}, {127.0.0.1:9300} -[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535] -[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144] -[2018-09-13T12:20:08,355][INFO ][o.e.c.s.MasterService ] [localhost.localdomain] zen-disco-elected-as-master ([0] nodes joined)[, ], reason: master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]} -[2018-09-13T12:20:08,360][INFO ][o.e.c.s.ClusterApplierService] [localhost.localdomain] master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}, reason: apply cluster state (from master [master {localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test} committed version [1] source [zen-disco-elected-as-master ([0] nodes joined)[, ]]]) -[2018-09-13T12:20:08,384][INFO ][o.e.h.n.Netty4HttpServerTransport] [localhost.localdomain] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200} -[2018-09-13T12:20:08,384][INFO ][o.e.n.Node ] [localhost.localdomain] started - --------------------------------------------------- - -Without going too much into detail, we can see that our node named "6-bjhwl" (which will be a different set of characters in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster. - -As mentioned previously, we can override either the cluster or node name. This can be done from the command line when starting Elasticsearch as follows: - -[source,sh] +./elasticsearch -Epath.data=data2 -Epath.logs=log2 +./elasticsearch -Epath.data=data3 -Epath.logs=log3 -------------------------------------------------- -./elasticsearch -Ecluster.name=my_cluster_name -Enode.name=my_node_name ++ +Windows: ++ +["source","sh",subs="attributes,callouts"] -------------------------------------------------- - -Also note the line marked http with information about the HTTP address (`192.168.8.112`) and port (`9200`) that our node is reachable from. By default, Elasticsearch uses port `9200` to provide access to its REST API. This port is configurable if necessary. - -[[getting-started-explore]] -== Exploring Your Cluster - -[float] -=== The REST API - -Now that we have our node (and cluster) up and running, the next step is to understand how to communicate with it. Fortunately, Elasticsearch provides a very comprehensive and powerful REST API that you can use to interact with your cluster. Among the few things that can be done with the API are as follows: - -* Check your cluster, node, and index health, status, and statistics -* Administer your cluster, node, and index data and metadata -* Perform CRUD (Create, Read, Update, and Delete) and search operations against your indexes -* Execute advanced search operations such as paging, sorting, filtering, scripting, aggregations, and many others - -[[getting-started-cluster-health]] -=== Cluster Health - -Let's start with a basic health check, which we can use to see how our cluster is doing. We'll be using curl to do this but you can use any tool that allows you to make HTTP/REST calls. Let's assume that we are still on the same node where we started Elasticsearch on and open another command shell window. - -To check the cluster health, we will be using the {ref}/cat.html[`_cat` API]. You can -run the command below in {kibana-ref}/console-kibana.html[Kibana's Console] -by clicking "VIEW IN CONSOLE" or with `curl` by clicking the "COPY AS CURL" -link below and pasting it into a terminal. - +.\elasticsearch.exe -Epath.data=data2 -Epath.logs=log2 +.\elasticsearch.exe -Epath.data=data3 -Epath.logs=log3 +-------------------------------------------------- ++ +The additional nodes are assigned unique IDs. Because you're running all three +nodes locally, they automatically join the cluster with the first node. + +. Use the cat health API to verify that your three-node cluster is up running. +The cat APIs return information about your cluster and indices in a +format that's easier to read than raw JSON. ++ +You can interact directly with your cluster by submitting HTTP requests to +the {es} REST API. Most of the examples in this guide enable you to copy the +appropriate cURL command and submit the request to your local {es} instance from +the command line. If you have Kibana installed and running, you can also +open Kibana and submit requests through the Dev Console. ++ +TIP: You'll want to check out the +https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} language +clients] when you're ready to start using {es} in your own applications. ++ [source,js] -------------------------------------------------- GET /_cat/health?v -------------------------------------------------- // CONSOLE - -And the response: - ++ +The response should indicate that the status of the `elasticsearch` cluster +is `green` and it has three nodes: ++ [source,txt] -------------------------------------------------- epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0% +1565052807 00:53:27 elasticsearch green 3 3 6 3 0 0 0 0 - 100.0% -------------------------------------------------- -// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/] +// TESTRESPONSE[s/1565052807 00:53:27 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/] +// TESTRESPONSE[s/3 3 6 3/\\d+ \\d+ \\d+ \\d+/] // TESTRESPONSE[s/0 0 -/0 \\d+ -/] -// TESTRESPONSE[_cat] - -We can see that our cluster named "elasticsearch" is up with a green status. - -Whenever we ask for the cluster health, we either get green, yellow, or red. - - * Green - everything is good (cluster is fully functional) - * Yellow - all data is available but some replicas are not yet allocated (cluster is fully functional) - * Red - some data is not available for whatever reason (cluster is partially functional) - -**Note:** When a cluster is red, it will continue to serve search requests from the available shards but you will likely need to fix it ASAP since there are unassigned shards. - -Also from the above response, we can see a total of 1 node and that we have 0 shards since we have no data in it yet. Note that since we are using the default cluster name (elasticsearch) and since Elasticsearch uses unicast network discovery by default to find other nodes on the same machine, it is possible that you could accidentally start up more than one node on your computer and have them all join a single cluster. In this scenario, you may see more than 1 node in the above response. - -We can also get a list of nodes in our cluster as follows: - -[source,js] --------------------------------------------------- -GET /_cat/nodes?v --------------------------------------------------- -// CONSOLE - -And the response: - -[source,txt] --------------------------------------------------- -ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name -127.0.0.1 10 5 5 4.46 mdi * PB2SGZY --------------------------------------------------- -// TESTRESPONSE[s/10 5 5 4.46/\\d+ \\d+ \\d+ (\\d+\\.\\d+)? (\\d+\\.\\d+)? (\\d+\.\\d+)?/] -// TESTRESPONSE[s/[*]/[*]/ s/PB2SGZY/.+/ _cat] - -Here, we can see our one node named "PB2SGZY", which is the single node that is currently in our cluster. - -[[getting-started-list-indices]] -=== List All Indices - -Now let's take a peek at our indices: - -[source,js] --------------------------------------------------- -GET /_cat/indices?v --------------------------------------------------- -// CONSOLE - -And the response: - -[source,txt] --------------------------------------------------- -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size --------------------------------------------------- -// TESTRESPONSE[_cat] - -Which simply means we have no indices yet in the cluster. - -[[getting-started-create-index]] -=== Create an Index - -Now let's create an index named "customer" and then list all the indexes again: - -[source,js] --------------------------------------------------- -PUT /customer?pretty -GET /_cat/indices?v --------------------------------------------------- -// CONSOLE - -The first command creates the index named "customer" using the PUT verb. We simply append `pretty` to the end of the call to tell it to pretty-print the JSON response (if any). - -And the response: +// TESTRESPONSE[non_json] ++ +NOTE: The cluster status will remain yellow if you are only running a single +instance of {es}. A single node cluster is fully functional, but data +cannot be replicated to another node to provide resiliency. Replica shards must +be available for the cluster status to be green. If the cluster status is red, +some data is unavailable. -[source,txt] --------------------------------------------------- -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -yellow open customer 95SQ4TSUT7mWBT7VNHH67A 1 1 0 0 260b 260b --------------------------------------------------- -// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat] +[float] +[[gs-other-install]] +=== Other installation options -The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it. +Installing {es} from an archive file enables you to easily install and run +multiple instances locally so you can try things out. To run a single instance, +you can run {es} in a Docker container, install {es} using the DEB or RPM +packages on Linux, install using Homebrew on macOS, or install using the MSI +package installer on Windows. See <> for more information. -You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green. +[[getting-started-index]] +== Index some documents -[[getting-started-query-document]] -=== Index and Query a Document +Once you have a cluster up and running, you're ready to index some data. +There are a variety of ingest options for {es}, but in the end they all +do the same thing: put JSON documents into an {es} index. -Let's now put something into our customer index. We'll index a simple customer document into the customer index, with an ID of 1 as follows: +You can do this directly with a simple PUT request that specifies +the index you want to add the document, a unique document ID, and one or more +`"field": "value"` pairs in the request body: [source,js] -------------------------------------------------- -PUT /customer/_doc/1?pretty +PUT /customer/_doc/1 { "name": "John Doe" } -------------------------------------------------- // CONSOLE -And the response: +This request automatically creates the `customer` index if it doesn't already +exist, adds a new document that has an ID of `1`, and stores and +indexes the `name` field. + +Since this is a new document, the response shows that the result of the +operation was that version 1 of the document was created: [source,js] -------------------------------------------------- @@ -412,29 +237,30 @@ And the response: "result" : "created", "_shards" : { "total" : 2, - "successful" : 1, + "successful" : 2, "failed" : 0 }, - "_seq_no" : 0, - "_primary_term" : 1 + "_seq_no" : 26, + "_primary_term" : 4 } -------------------------------------------------- -// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/] +// TESTRESPONSE[s/"successful" : \d+/"successful" : $body._shards.successful/] +// TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/] -From the above, we can see that a new customer document was successfully created inside the customer index. The document also has an internal id of 1 which we specified at index time. -It is important to note that Elasticsearch does not require you to explicitly create an index first before you can index documents into it. In the previous example, Elasticsearch will automatically create the customer index if it didn't already exist beforehand. - -Let's now retrieve that document that we just indexed: +The new document is available immediately from any node in the cluster. +You can retrieve it with a GET request that specifies its document ID: [source,js] -------------------------------------------------- -GET /customer/_doc/1?pretty +GET /customer/_doc/1 -------------------------------------------------- // CONSOLE // TEST[continued] -And the response: +The response indicates that a document with the specified ID was found +and shows the original source fields that were indexed. [source,js] -------------------------------------------------- @@ -443,226 +269,33 @@ And the response: "_type" : "_doc", "_id" : "1", "_version" : 1, - "_seq_no" : 25, - "_primary_term" : 1, + "_seq_no" : 26, + "_primary_term" : 4, "found" : true, - "_source" : { "name": "John Doe" } -} --------------------------------------------------- -// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] - -Nothing out of the ordinary here other than a field, `found`, stating that we found a document with the requested ID 1 and another field, `_source`, which returns the full JSON document that we indexed from the previous step. - -[[getting-started-delete-index]] -=== Delete an Index - -Now let's delete the index that we just created and then list all the indexes again: - -[source,js] --------------------------------------------------- -DELETE /customer?pretty -GET /_cat/indices?v --------------------------------------------------- -// CONSOLE -// TEST[continued] - -And the response: - -[source,txt] --------------------------------------------------- -health status index uuid pri rep docs.count docs.deleted store.size pri.store.size --------------------------------------------------- -// TESTRESPONSE[_cat] - -Which means that the index was deleted successfully and we are now back to where we started with nothing in our cluster. - -Before we move on, let's take a closer look again at some of the API commands that we have learned so far: - -[source,js] --------------------------------------------------- -PUT /customer -PUT /customer/_doc/1 -{ - "name": "John Doe" + "_source" : { + "name": "John Doe" + } } -GET /customer/_doc/1 -DELETE /customer --------------------------------------------------- -// CONSOLE - -If we study the above commands carefully, we can actually see a pattern of how we access data in Elasticsearch. That pattern can be summarized as follows: - -[source,js] -------------------------------------------------- - /// --------------------------------------------------- -// NOTCONSOLE - -This REST access pattern is so pervasive throughout all the API commands that if you can simply remember it, you will have a good head start at mastering Elasticsearch. - -[[getting-started-modify-data]] -== Modifying Your Data - -Elasticsearch provides data manipulation and search capabilities in near real time. By default, you can expect a one second delay (refresh interval) from the time you index/update/delete your data until the time that it appears in your search results. This is an important distinction from other platforms like SQL wherein data is immediately available after a transaction is completed. +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ ] +// TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/] [float] -=== Indexing/Replacing Documents - -We've previously seen how we can index a single document. Let's recall that command again: - -[source,js] --------------------------------------------------- -PUT /customer/_doc/1?pretty -{ - "name": "John Doe" -} --------------------------------------------------- -// CONSOLE - -Again, the above will index the specified document into the customer index, with the ID of 1. If we then executed the above command again with a different (or same) document, Elasticsearch will replace (i.e. reindex) a new document on top of the existing one with the ID of 1: - -[source,js] --------------------------------------------------- -PUT /customer/_doc/1?pretty -{ - "name": "Jane Doe" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -The above changes the name of the document with the ID of 1 from "John Doe" to "Jane Doe". If, on the other hand, we use a different ID, a new document will be indexed and the existing document(s) already in the index remains untouched. - -[source,js] --------------------------------------------------- -PUT /customer/_doc/2?pretty -{ - "name": "Jane Doe" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -The above indexes a new document with an ID of 2. - -When indexing, the ID part is optional. If not specified, Elasticsearch will generate a random ID and then use it to index the document. The actual ID Elasticsearch generates (or whatever we specified explicitly in the previous examples) is returned as part of the index API call. - -This example shows how to index a document without an explicit ID: - -[source,js] --------------------------------------------------- -POST /customer/_doc?pretty -{ - "name": "Jane Doe" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note that in the above case, we are using the `POST` verb instead of PUT since we didn't specify an ID. - -[[getting-started-update-documents]] -=== Updating Documents - -In addition to being able to index and replace documents, we can also update documents. Note though that Elasticsearch does not actually do in-place updates under the hood. Whenever we do an update, Elasticsearch deletes the old document and then indexes a new document with the update applied to it in one shot. - -This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe": - -[source,js] --------------------------------------------------- -POST /customer/_update/1?pretty -{ - "doc": { "name": "Jane Doe" } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -This example shows how to update our previous document (ID of 1) by changing the name field to "Jane Doe" and at the same time add an age field to it: - -[source,js] --------------------------------------------------- -POST /customer/_update/1?pretty -{ - "doc": { "name": "Jane Doe", "age": 20 } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Updates can also be performed by using simple scripts. This example uses a script to increment the age by 5: - -[source,js] --------------------------------------------------- -POST /customer/_update/1?pretty -{ - "script" : "ctx._source.age += 5" -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, `ctx._source` refers to the current source document that is about to be updated. - -Elasticsearch provides the ability to update multiple documents given a query condition (like an `SQL UPDATE-WHERE` statement). See {ref}/docs-update-by-query.html[`docs-update-by-query` API] - -[[getting-started-delete-documents]] -=== Deleting Documents - -Deleting a document is fairly straightforward. This example shows how to delete our previous customer with the ID of 2: - -[source,js] --------------------------------------------------- -DELETE /customer/_doc/2?pretty --------------------------------------------------- -// CONSOLE -// TEST[continued] - -See the {ref}/docs-delete-by-query.html[`_delete_by_query` API] to delete all documents matching a specific query. -It is worth noting that it is much more efficient to delete a whole index -instead of deleting all documents with the Delete By Query API. - [[getting-started-batch-processing]] -=== Batch Processing - -In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the {ref}/docs-bulk.html[`_bulk` API]. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible. - -As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation: - -[source,js] --------------------------------------------------- -POST /customer/_bulk?pretty -{"index":{"_id":"1"}} -{"name": "John Doe" } -{"index":{"_id":"2"}} -{"name": "Jane Doe" } --------------------------------------------------- -// CONSOLE - -This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation: - -[source,sh] --------------------------------------------------- -POST /customer/_bulk?pretty -{"update":{"_id":"1"}} -{"doc": { "name": "John Doe becomes Jane Doe" } } -{"delete":{"_id":"2"}} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted. +=== Indexing documents in bulk -The Bulk API does not fail due to failures in one of the actions. If a single action fails for whatever reason, it will continue to process the remainder of the actions after it. When the bulk API returns, it will provide a status for each action (in the same order it was sent in) so that you can check if a specific action failed or not. +If you have a lot of documents to index, you can submit them in batches with +the {ref}/docs-bulk.html[bulk API]. Using bulk to batch document +operations is significantly faster than submitting requests individually as it minimizes network roundtrips. -[[getting-started-explore-data]] -== Exploring Your Data +The optimal batch size depends a number of factors: the document size and complexity, the indexing and search load, and the resources available to your cluster. A good place to start is with batches of 1,000 to 5,000 documents +and a total payload between 5MB and 15MB. From there, you can experiment +to find the sweet spot. -[float] -=== Sample Dataset - -Now that we've gotten a glimpse of the basics, let's try to work on a more realistic dataset. I've prepared a sample of fictitious JSON documents of customer bank account information. Each document has the following schema: +To get some data into {es} that you can start searching and analyzing: +. Download the https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[`accounts.json`] sample data set. The documents in this randomly-generated data set represent user accounts with the following information: ++ [source,js] -------------------------------------------------- { @@ -681,24 +314,19 @@ Now that we've gotten a glimpse of the basics, let's try to work on a more reali -------------------------------------------------- // NOTCONSOLE -For the curious, this data was generated using http://www.json-generator.com/[`www.json-generator.com/`], so please ignore the actual values and semantics of the data as these are all randomly generated. - -[float] -=== Loading the Sample Dataset - -You can download the sample dataset (accounts.json) from https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows: - +. Index the account data into the `bank` index with the following `_bulk` request: ++ [source,sh] -------------------------------------------------- curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json" curl "localhost:9200/_cat/indices?v" -------------------------------------------------- // NOTCONSOLE - ++ //// This replicates the above in a document-testing friendly way but isn't visible in the docs: - ++ [source,js] -------------------------------------------------- GET /_cat/indices?v @@ -706,98 +334,28 @@ GET /_cat/indices?v // CONSOLE // TEST[setup:bank] //// - -And the response: - ++ +The response indicates that 1,000 documents were indexed successfully. ++ [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 128.6kb 128.6kb -------------------------------------------------- // TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/] -// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ _cat] - -Which means that we just successfully bulk indexed 1000 documents into the bank index. - -[[getting-started-search-API]] -=== The Search API - -Now let's start with some simple searches. There are two basic ways to run searches: one is by sending search parameters through the {ref}/search-uri-request.html[REST request URI] and the other by sending them through the {ref}/search-request-body.html[REST request body]. The request body method allows you to be more expressive and also to define your searches in a more readable JSON format. We'll try one example of the request URI method but for the remainder of this tutorial, we will exclusively be using the request body method. - -The REST API for search is accessible from the `_search` endpoint. This example returns all documents in the bank index: - -[source,js] --------------------------------------------------- -GET /bank/_search?q=*&sort=account_number:asc&pretty --------------------------------------------------- -// CONSOLE -// TEST[continued] +// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json] -Let's first dissect the search call. We are searching (`_search` endpoint) in the bank index, and the `q=*` parameter instructs Elasticsearch to match all documents in the index. The `sort=account_number:asc` parameter indicates to sort the results using the `account_number` field of each document in an ascending order. The `pretty` parameter, again, just tells Elasticsearch to return pretty-printed JSON results. - -And the response (partially shown): +[[getting-started-search]] +== Start searching -[source,js] --------------------------------------------------- -{ - "took" : 63, - "timed_out" : false, - "_shards" : { - "total" : 5, - "successful" : 5, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value": 1000, - "relation": "eq" - }, - "max_score" : null, - "hits" : [ { - "_index" : "bank", - "_type" : "_doc", - "_id" : "0", - "sort": [0], - "_score" : null, - "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} - }, { - "_index" : "bank", - "_type" : "_doc", - "_id" : "1", - "sort": [1], - "_score" : null, - "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} - }, ... - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took" : 63/"took" : $body.took/] -// TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] +Once you have ingested some data into an {es} index, you can search it +by sending requests to the `_search` endpoint. To access the full suite of +search capabilities, you use the {es} Query DSL to specify the +search criteria in the request body. You specify the name of the index you +want to search in the request URI. -As for the response, we see the following parts: - -* `took` – time in milliseconds for Elasticsearch to execute the search -* `timed_out` – tells us if the search timed out or not -* `_shards` – tells us how many shards were searched, as well as a count of the successful/failed searched shards -* `hits` – search results -* `hits.total` – an object that contains information about the total number of documents matching our search criteria -** `hits.total.value` - the value of the total hit count (must be interpreted in the context of `hits.total.relation`). -** `hits.total.relation` - whether `hits.total.value` is the exact hit count, in which case it is equal to `"eq"` or a - lower bound of the total hit count (greater than or equals), in which case it is equal to `gte`. -* `hits.hits` – actual array of search results (defaults to first 10 documents) -* `hits.sort` - sort key for results (missing if sorting by score) -* `hits._score` and `max_score` - ignore these fields for now - -The accuracy of `hits.total` is controlled by the request parameter `track_total_hits`, when set to true -the request will track the total hits accurately (`"relation": "eq"`). It defaults to `10,000` -which means that the total hit count is accurately tracked up to `10,000` documents. -You can force an accurate count by setting `track_total_hits` to true explicitly. -See the <> documentation -for more details. - -Here is the same exact search above using the alternative request body method: +For example, the following request retrieves all documents in the `bank` +index sorted by account number: [source,js] -------------------------------------------------- @@ -812,11 +370,8 @@ GET /bank/_search // CONSOLE // TEST[continued] -The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. - -//// -Hidden response just so we can assert that it is indeed the same but don't have -to clutter the docs with it: +By default, the `hits` section of the response includes the first 10 documents +that match the search criteria: [source,js] -------------------------------------------------- @@ -831,23 +386,23 @@ to clutter the docs with it: }, "hits" : { "total" : { - "value": 1000, - "relation": "eq" + "value": 1000, + "relation": "eq" }, - "max_score": null, + "max_score" : null, "hits" : [ { "_index" : "bank", "_type" : "_doc", "_id" : "0", "sort": [0], - "_score": null, + "_score" : null, "_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"} }, { "_index" : "bank", "_type" : "_doc", "_id" : "1", "sort": [1], - "_score": null, + "_score" : null, "_source" : {"account_number":1,"balance":39225,"firstname":"Amber","lastname":"Duke","age":32,"gender":"M","address":"880 Holmes Lane","employer":"Pyrami","email":"amberduke@pyrami.com","city":"Brogan","state":"IL"} }, ... ] @@ -857,53 +412,31 @@ to clutter the docs with it: // TESTRESPONSE[s/"took" : 63/"took" : $body.took/] // TESTRESPONSE[s/\.\.\./$body.hits.hits.2, $body.hits.hits.3, $body.hits.hits.4, $body.hits.hits.5, $body.hits.hits.6, $body.hits.hits.7, $body.hits.hits.8, $body.hits.hits.9/] -//// - -It is important to understand that once you get your search results back, Elasticsearch is completely done with the request and does not maintain any kind of server-side resources or open cursors into your results. This is in stark contrast to many other platforms such as SQL wherein you may initially get a partial subset of your query results up-front and then you have to continuously go back to the server if you want to fetch (or page through) the rest of the results using some kind of stateful server-side cursor. - -[[getting-started-query-lang]] -=== Introducing the Query Language - -Elasticsearch provides a JSON-style domain-specific language that you can use to execute queries. This is referred to as the {ref}/query-dsl.html[Query DSL]. The query language is quite comprehensive and can be intimidating at first glance but the best way to actually learn it is to start with a few basic examples. - -Going back to our last example, we executed this query: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Dissecting the above, the `query` part tells us what our query definition is and the `match_all` part is simply the type of query that we want to run. The `match_all` query is simply a search for all documents in the specified index. +The response also provides the following information about the search request: -In addition to the `query` parameter, we also can pass other parameters to -influence the search results. In the example in the section above we passed in -`sort`, here we pass in `size`: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "size": 1 -} --------------------------------------------------- -// CONSOLE -// TEST[continued] +* `took` – how long it took {es} to run the query, in milliseconds +* `timed_out` – whether or not the search request timed out +* `_shards` – how many shards were searched and a breakdown of how many shards +succeeded, failed, or were skipped. +* `max_score` – the score of the most relevant document found +* `hits.total.value` - how many matching documents were found +* `hits.sort` - the document's sort position (when not sorting by relevance score) +* `hits._score` - the document's relevance score (not applicable when using `match_all`) -Note that if `size` is not specified, it defaults to 10. +Each search request is self-contained: {es} does not maintain any +state information across requests. To page through the search hits, specify +the `from` and `size` parameters in your request. -This example does a `match_all` and returns documents 10 through 19: +For example, the following request gets hits 10 through 19: [source,js] -------------------------------------------------- GET /bank/_search { "query": { "match_all": {} }, + "sort": [ + { "account_number": "asc" } + ], "from": 10, "size": 10 } @@ -911,70 +444,12 @@ GET /bank/_search // CONSOLE // TEST[continued] -The `from` parameter (0-based) specifies which document index to start from and the `size` parameter specifies how many documents to return starting at the from parameter. This feature is useful when implementing paging of search results. Note that if `from` is not specified, it defaults to 0. - -This example does a `match_all` and sorts the results by account balance in descending order and returns the top 10 (default size) documents. - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "sort": { "balance": { "order": "desc" } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[[getting-started-search]] -=== Executing Searches - -Now that we have seen a few of the basic search parameters, let's dig in some more into the Query DSL. Let's first take a look at the returned document fields. By default, the full JSON document is returned as part of all searches. This is referred to as the source (`_source` field in the search hits). If we don't want the entire source document returned, we have the ability to request only a few fields from within source to be returned. - -This example shows how to return two fields, `account_number` and `balance` (inside of `_source`), from the search: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match_all": {} }, - "_source": ["account_number", "balance"] -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note that the above example simply reduces the `_source` field. It will still only return one field named `_source` but within it, only the fields `account_number` and `balance` are included. - -If you come from a SQL background, the above is somewhat similar in concept to the `SQL SELECT FROM` field list. - -Now let's move on to the query part. Previously, we've seen how the `match_all` query is used to match all documents. Let's now introduce a new query called the {ref}/query-dsl-match-query.html[`match` query], which can be thought of as a basic fielded search query (i.e. a search done against a specific field or set of fields). - -This example returns the account numbered 20: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match": { "account_number": 20 } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -This example returns all accounts containing the term "mill" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { "match": { "address": "mill" } } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] +Now that you've seen how to submit a basic search request, you can start to +construct queries that are a bit more interesting than `match_all`. -This example returns all accounts containing the term "mill" or "lane" in the address: +To search for specific terms within a field, you can use a `match` query. +For example, the following request searches the `address` field to find +customers whose addresses contain `mill` or `lane`: [source,js] -------------------------------------------------- @@ -986,7 +461,9 @@ GET /bank/_search // CONSOLE // TEST[continued] -This example is a variant of `match` (`match_phrase`) that returns all accounts containing the phrase "mill lane" in the address: +To perform a phrase search rather than matching individual terms, you use +`match_phrase` instead of `match`. For example, the following request only +matches addresses that contain the phrase `mill lane`: [source,js] -------------------------------------------------- @@ -998,74 +475,13 @@ GET /bank/_search // CONSOLE // TEST[continued] -Let's now introduce the {ref}/query-dsl-bool-query.html[`bool` query]. The `bool` query allows us to compose smaller queries into bigger queries using boolean logic. - -This example composes two `match` queries and returns all accounts containing "mill" and "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "must": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool must` clause specifies all the queries that must be true for a document to be considered a match. - -In contrast, this example composes two `match` queries and returns all accounts containing "mill" or "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "should": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool should` clause specifies a list of queries either of which must be true for a document to be considered a match. - -This example composes two `match` queries and returns all accounts that contain neither "mill" nor "lane" in the address: - -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "query": { - "bool": { - "must_not": [ - { "match": { "address": "mill" } }, - { "match": { "address": "lane" } } - ] - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -In the above example, the `bool must_not` clause specifies a list of queries none of which must be true for a document to be considered a match. +To construct more complex queries, you can use a `bool` query to combine +multiple query criteria. You can designate criteria as required (must match), +desirable (should match), or undesirable (must not match). -We can combine `must`, `should`, and `must_not` clauses simultaneously inside a `bool` query. Furthermore, we can compose `bool` queries inside any of these `bool` clauses to mimic any complex multi-level boolean logic. - -This example returns all accounts of anybody who is 40 years old but doesn't live in ID(aho): +For example, the following request searches the `bank` index for accounts that +belong to customers who are 40 years old, but excludes anyone who lives in +Idaho (ID): [source,js] -------------------------------------------------- @@ -1086,16 +502,19 @@ GET /bank/_search // CONSOLE // TEST[continued] -[[getting-started-filters]] -=== Executing Filters - -In the previous section, we skipped over a little detail called the document score (`_score` field in the search results). The score is a numeric value that is a relative measure of how well the document matches the search query that we specified. The higher the score, the more relevant the document is, the lower the score, the less relevant the document is. +Each `must`, `should`, and `must_not` element in a Boolean query is referred +to as a query clause. How well a document meets the criteria in each `must` or +`should` clause contributes to the document's _relevance score_. The higher the +score, the better the document matches your search criteria. By default, {es} +returns documents ranked by these relevance scores. -But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. +The criteria in a `must_not` clause is treated as a _filter_. It affects whether +or not the document is included in the results, but does not contribute to +how documents are scored. You can also explicitly specify arbitrary filters to +include or exclude documents based on structured data. -The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. - -This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. +For example, the following request uses a range filter to limit the results to +accounts with a balance between $20,000 and $30,000 (inclusive). [source,js] -------------------------------------------------- @@ -1119,16 +538,18 @@ GET /bank/_search // CONSOLE // TEST[continued] -Dissecting the above, the bool query contains a `match_all` query (the query part) and a `range` query (the filter part). We can substitute any other queries into the query and the filter parts. In the above case, the range query makes perfect sense since documents falling into the range all match "equally", i.e., no document is more relevant than another. - -In addition to the `match_all`, `match`, `bool`, and `range` queries, there are a lot of other query types that are available and we won't go into them here. Since we already have a basic understanding of how they work, it shouldn't be too difficult to apply this knowledge in learning and experimenting with the other query types. - [[getting-started-aggregations]] -=== Executing Aggregations +== Analyze results with aggregations -Aggregations provide the ability to group and extract statistics from your data. The easiest way to think about aggregations is by roughly equating it to the SQL GROUP BY and the SQL aggregate functions. In Elasticsearch, you have the ability to execute searches returning hits and at the same time return aggregated results separate from the hits all in one response. This is very powerful and efficient in the sense that you can run queries and multiple aggregations and get the results back of both (or either) operations in one shot avoiding network roundtrips using a concise and simplified API. +{es} aggregations enable you to get meta-information about your search results +and answer questions like, "How many account holders are in Texas?" or +"What's the average balance of accounts in Tennessee?" You can search +documents, filter hits, and use aggregations to analyze the results all in one +request. -To start with, this example groups all the accounts by state, and then returns the top 10 (default) states sorted by count descending (also default): +For example, the following request uses a `terms` aggregation to group +all of the accounts in the `bank` index by state, and returns the ten states +with the most accounts in descending order: [source,js] -------------------------------------------------- @@ -1147,14 +568,10 @@ GET /bank/_search // CONSOLE // TEST[continued] -In SQL, the above aggregation is similar in concept to: - -[source,sh] --------------------------------------------------- -SELECT state, COUNT(*) FROM bank GROUP BY state ORDER BY COUNT(*) DESC LIMIT 10; --------------------------------------------------- - -And the response (partially shown): +The `buckets` in the response are the values of the `state` field. The +`doc_count` shows the number of accounts in each state. For example, you +can see that there are 27 accounts in `ID` (Idaho). Because the request +set `size=0`, the response only contains the aggregation results. [source,js] -------------------------------------------------- @@ -1216,12 +633,11 @@ And the response (partially shown): -------------------------------------------------- // TESTRESPONSE[s/"took": 29/"took": $body.took/] -We can see that there are 27 accounts in `ID` (Idaho), followed by 27 accounts -in `TX` (Texas), followed by 25 accounts in `AL` (Alabama), and so forth. -Note that we set `size=0` to not show search hits because we only want to see the aggregation results in the response. - -Building on the previous aggregation, this example calculates the average account balance by state (again only for the top 10 states sorted by count in descending order): +You can combine aggregations to build more complex summaries of your data. For +example, the following request nests an `avg` aggregation within the previous +`group_by_state` aggregation to calculate the average account balances for +each state. [source,js] -------------------------------------------------- @@ -1247,9 +663,8 @@ GET /bank/_search // CONSOLE // TEST[continued] -Notice how we nested the `average_balance` aggregation inside the `group_by_state` aggregation. This is a common pattern for all the aggregations. You can nest aggregations inside aggregations arbitrarily to extract pivoted summarizations that you require from your data. - -Building on the previous aggregation, let's now sort on the average balance in descending order: +Instead of sorting the results by count, you could sort using the result of +the nested aggregation by specifying the order within the `terms` aggregation: [source,js] -------------------------------------------------- @@ -1278,56 +693,29 @@ GET /bank/_search // CONSOLE // TEST[continued] -This example demonstrates how we can group by age brackets (ages 20-29, 30-39, and 40-49), then by gender, and then finally get the average account balance, per age bracket, per gender: +In addition to basic bucketing and metrics aggregations like these, {es} +provides specialized aggregations for operating on multiple fields and +analyzing particular types of data such as dates, IP addresses, and geo +data. You can also feed the results of individual aggregations into pipeline +aggregations for further analysis. -[source,js] --------------------------------------------------- -GET /bank/_search -{ - "size": 0, - "aggs": { - "group_by_age": { - "range": { - "field": "age", - "ranges": [ - { - "from": 20, - "to": 30 - }, - { - "from": 30, - "to": 40 - }, - { - "from": 40, - "to": 50 - } - ] - }, - "aggs": { - "group_by_gender": { - "terms": { - "field": "gender.keyword" - }, - "aggs": { - "average_balance": { - "avg": { - "field": "balance" - } - } - } - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] +The core analysis capabilities provided by aggregations enable advanced +features such as using machine learning to detect anomalies. + +[[getting-started-next-steps]] +== Where to go from here + +Now that you've set up a cluster, indexed some documents, and run some +searches and aggregations, you might want to: -There are many other aggregations capabilities that we won't go into detail here. The {ref}/search-aggregations.html[aggregations reference guide] is a great starting point if you want to do further experimentation. +* {stack-gs}/get-started-elastic-stack.html#install-kibana[Dive in to the Elastic +Stack Tutorial] to install Kibana, Logstash, and Beats and +set up a basic system monitoring solution. -[[getting-started-conclusion]] -== Conclusion +* {kibana-ref}/add-sample-data.html[Load one of the sample data sets into Kibana] +to see how you can use {es} and Kibana together to visualize your data. -Elasticsearch is both a simple and complex product. We've so far learned the basics of what it is, how to look inside of it, and how to work with it using some of the REST APIs. Hopefully this tutorial has given you a better understanding of what Elasticsearch is and more importantly, inspired you to further experiment with the rest of its great features! +* Try out one of the Elastic search solutions: +** https://swiftype.com/documentation/site-search/crawler-quick-start[Site Search] +** https://swiftype.com/documentation/app-search/getting-started[App Search] +** https://swiftype.com/documentation/enterprise-search/getting-started[Enterprise Search] \ No newline at end of file diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index c6b9309fa3240..c307e87f98ebd 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -5,201 +5,233 @@ [glossary] [[glossary-analysis]] analysis :: - Analysis is the process of converting <> to - <>. Depending on which analyzer is used, these phrases: - `FOO BAR`, `Foo-Bar`, `foo,bar` will probably all result in the - terms `foo` and `bar`. These terms are what is actually stored in - the index. - + - A full text query (not a <> query) for `FoO:bAR` will - also be analyzed to the terms `foo`,`bar` and will thus match the - terms stored in the index. - + - It is this process of analysis (both at index time and at search time) - that allows Elasticsearch to perform full text queries. - + - Also see <> and <>. +Analysis is the process of converting <> to +<>. Depending on which analyzer is used, these phrases: +`FOO BAR`, `Foo-Bar`, `foo,bar` will probably all result in the +terms `foo` and `bar`. These terms are what is actually stored in +the index. ++ +A full text query (not a <> query) for `FoO:bAR` will +also be analyzed to the terms `foo`,`bar` and will thus match the +terms stored in the index. ++ +It is this process of analysis (both at index time and at search time) +that allows Elasticsearch to perform full text queries. ++ +Also see <> and <>. [[glossary-cluster]] cluster :: - A cluster consists of one or more <> which share the - same cluster name. Each cluster has a single master node which is - chosen automatically by the cluster and which can be replaced if the - current master node fails. +A cluster consists of one or more <> which share the +same cluster name. Each cluster has a single master node which is +chosen automatically by the cluster and which can be replaced if the +current master node fails. -[[glossary-document]] document :: +[[glossary-ccr]] {ccr} (CCR):: - A document is a JSON document which is stored in Elasticsearch. It is - like a row in a table in a relational database. Each document is - stored in an <> and has a <> and an - <>. - + - A document is a JSON object (also known in other languages as a hash / - hashmap / associative array) which contains zero or more - <>, or key-value pairs. - + - The original JSON document that is indexed will be stored in the - <>, which is returned by default when - getting or searching for a document. +The {ccr} feature enables you to replicate indices in remote clusters to your +local cluster. For more information, see +{stack-ov}/xpack-ccr.html[{ccr-cap}]. + +[[glossary-ccs]] {ccs} (CCS):: -[[glossary-id]] id :: +The {ccs} feature enables any node to act as a federated client across +multiple clusters. See <>. - The ID of a <> identifies a document. The - `index/id` of a document must be unique. If no ID is provided, - then it will be auto-generated. (also see <>) +[[glossary-document]] document :: + +A document is a JSON document which is stored in Elasticsearch. It is +like a row in a table in a relational database. Each document is +stored in an <> and has a <> and an +<>. ++ +A document is a JSON object (also known in other languages as a hash / +hashmap / associative array) which contains zero or more +<>, or key-value pairs. ++ +The original JSON document that is indexed will be stored in the +<>, which is returned by default when +getting or searching for a document. [[glossary-field]] field :: - A <> contains a list of fields, or key-value - pairs. The value can be a simple (scalar) value (eg a string, integer, - date), or a nested structure like an array or an object. A field is - similar to a column in a table in a relational database. - + - The <> for each field has a field _type_ (not to - be confused with document <>) which indicates the type - of data that can be stored in that field, eg `integer`, `string`, - `object`. The mapping also allows you to define (amongst other things) - how the value for a field should be analyzed. +A <> contains a list of fields, or key-value +pairs. The value can be a simple (scalar) value (eg a string, integer, +date), or a nested structure like an array or an object. A field is +similar to a column in a table in a relational database. ++ +The <> for each field has a field _type_ (not to +be confused with document <>) which indicates the type +of data that can be stored in that field, eg `integer`, `string`, +`object`. The mapping also allows you to define (amongst other things) +how the value for a field should be analyzed. [[glossary-filter]] filter :: - A filter is a non-scoring <>, meaning that it does not score documents. - It is only concerned about answering the question - "Does this document match?". - The answer is always a simple, binary yes or no. This kind of query is said to be made - in a <>, - hence it is called a filter. Filters are simple checks for set inclusion or exclusion. - In most cases, the goal of filtering is to reduce the number of documents that have to be examined. +A filter is a non-scoring <>, meaning that it does not score documents. +It is only concerned about answering the question - "Does this document match?". +The answer is always a simple, binary yes or no. This kind of query is said to be made +in a <>, +hence it is called a filter. Filters are simple checks for set inclusion or exclusion. +In most cases, the goal of filtering is to reduce the number of documents that have to be examined. + +[[glossary-follower-index]] follower index :: + +Follower indices are the target indices for <>. They exist +in your local cluster and replicate <>. + +[[glossary-id]] id :: + +The ID of a <> identifies a document. The +`index/id` of a document must be unique. If no ID is provided, +then it will be auto-generated. (also see <>) [[glossary-index]] index :: - An index is like a _table_ in a relational database. It has a - <> which contains a <>, - which contains the <> in the index. - + - An index is a logical namespace which maps to one or more - <> and can have zero or more - <>. +An index is like a _table_ in a relational database. It has a +<> which contains a <>, +which contains the <> in the index. ++ +An index is a logical namespace which maps to one or more +<> and can have zero or more +<>. + +[[glossary-leader-index]] leader index :: + +Leader indices are the source indices for <>. They exist +on remote clusters and are replicated to +<>. [[glossary-mapping]] mapping :: - A mapping is like a _schema definition_ in a relational database. Each - <> has a mapping, which defines a <>, - plus a number of index-wide settings. - + - A mapping can either be defined explicitly, or it will be generated - automatically when a document is indexed. +A mapping is like a _schema definition_ in a relational database. Each +<> has a mapping, which defines a <>, +plus a number of index-wide settings. ++ +A mapping can either be defined explicitly, or it will be generated +automatically when a document is indexed. [[glossary-node]] node :: - A node is a running instance of Elasticsearch which belongs to a - <>. Multiple nodes can be started on a single - server for testing purposes, but usually you should have one node per - server. - + - At startup, a node will use unicast to discover an existing cluster with - the same cluster name and will try to join that cluster. - - [[glossary-primary-shard]] primary shard :: - - Each document is stored in a single primary <>. When - you index a document, it is indexed first on the primary shard, then - on all <> of the primary shard. - + - By default, an <> has one primary shard. You can specify - more primary shards to scale the number of <> - that your index can handle. - + - You cannot change the number of primary shards in an index, once the index is - index is created. However, an index can be split into a new index using the - <>. - + - See also <> +A node is a running instance of Elasticsearch which belongs to a +<>. Multiple nodes can be started on a single +server for testing purposes, but usually you should have one node per +server. ++ +At startup, a node will use unicast to discover an existing cluster with +the same cluster name and will try to join that cluster. + +[[glossary-primary-shard]] primary shard :: + +Each document is stored in a single primary <>. When +you index a document, it is indexed first on the primary shard, then +on all <> of the primary shard. ++ +By default, an <> has one primary shard. You can specify +more primary shards to scale the number of <> +that your index can handle. ++ +You cannot change the number of primary shards in an index, once the index is +created. However, an index can be split into a new index using the +<>. ++ +See also <> [[glossary-query]] query :: - A query is the basic component of a search. A search can be defined by one or more queries - which can be mixed and matched in endless combinations. While <> are - queries that only determine if a document matches, those queries that also calculate how well - the document matches are known as "scoring queries". Those queries assign it a score, which is - later used to sort matched documents. Scoring queries take more resources than <> - and their query results are not cacheable. As a general rule, use query clauses for full-text - search or for any condition that requires scoring, and use filters for everything else. - - [[glossary-replica-shard]] replica shard :: - - Each <> can have zero or more - replicas. A replica is a copy of the primary shard, and has two - purposes: - + - 1. increase failover: a replica shard can be promoted to a primary - shard if the primary fails - 2. increase performance: get and search requests can be handled by - primary or replica shards. - + - By default, each primary shard has one replica, but the number of - replicas can be changed dynamically on an existing index. A replica - shard will never be started on the same node as its primary shard. +A query is the basic component of a search. A search can be defined by one or more queries +which can be mixed and matched in endless combinations. While <> are +queries that only determine if a document matches, those queries that also calculate how well +the document matches are known as "scoring queries". Those queries assign it a score, which is +later used to sort matched documents. Scoring queries take more resources than <> +and their query results are not cacheable. As a general rule, use query clauses for full-text +search or for any condition that requires scoring, and use filters for everything else. + +[[glossary-recovery]] recovery :: +The process of syncing a shard copy from a source shard. Upon completion, the recovery process makes the shard copy available for queries. ++ +Recovery automatically occurs anytime a shard moves to a different node in the same cluster, including: + +* Node startup +* Node failure +* Index shard replication +* Snapshot restoration + +[[glossary-replica-shard]] replica shard :: + +Each <> can have zero or more +replicas. A replica is a copy of the primary shard, and has two +purposes: ++ +1. increase failover: a replica shard can be promoted to a primary +shard if the primary fails +2. increase performance: get and search requests can be handled by +primary or replica shards. ++ +By default, each primary shard has one replica, but the number of +replicas can be changed dynamically on an existing index. A replica +shard will never be started on the same node as its primary shard. [[glossary-routing]] routing :: - When you index a document, it is stored on a single - <>. That shard is chosen by hashing - the `routing` value. By default, the `routing` value is derived from - the ID of the document or, if the document has a specified parent - document, from the ID of the parent document (to ensure that child and - parent documents are stored on the same shard). - + - This value can be overridden by specifying a `routing` value at index - time, or a <> in the <>. +When you index a document, it is stored on a single +<>. That shard is chosen by hashing +the `routing` value. By default, the `routing` value is derived from +the ID of the document or, if the document has a specified parent +document, from the ID of the parent document (to ensure that child and +parent documents are stored on the same shard). ++ +This value can be overridden by specifying a `routing` value at index +time, or a <> in the <>. [[glossary-shard]] shard :: - A shard is a single Lucene instance. It is a low-level “worker” unit - which is managed automatically by Elasticsearch. An index is a logical - namespace which points to <> and - <> shards. - + - Other than defining the number of primary and replica shards that an - index should have, you never need to refer to shards directly. - Instead, your code should deal only with an index. - + - Elasticsearch distributes shards amongst all <> in the - <>, and can move shards automatically from one - node to another in the case of node failure, or the addition of new - nodes. - - [[glossary-source_field]] source field :: - - By default, the JSON document that you index will be stored in the - `_source` field and will be returned by all get and search requests. - This allows you access to the original object directly from search - results, rather than requiring a second step to retrieve the object - from an ID. +A shard is a single Lucene instance. It is a low-level “worker” unit +which is managed automatically by Elasticsearch. An index is a logical +namespace which points to <> and +<> shards. ++ +Other than defining the number of primary and replica shards that an +index should have, you never need to refer to shards directly. +Instead, your code should deal only with an index. ++ +Elasticsearch distributes shards amongst all <> in the +<>, and can move shards automatically from one +node to another in the case of node failure, or the addition of new +nodes. + +[[glossary-source_field]] source field :: + +By default, the JSON document that you index will be stored in the +`_source` field and will be returned by all get and search requests. +This allows you access to the original object directly from search +results, rather than requiring a second step to retrieve the object +from an ID. [[glossary-term]] term :: - A term is an exact value that is indexed in Elasticsearch. The terms - `foo`, `Foo`, `FOO` are NOT equivalent. Terms (i.e. exact values) can - be searched for using _term_ queries. - + - See also <> and <>. +A term is an exact value that is indexed in Elasticsearch. The terms +`foo`, `Foo`, `FOO` are NOT equivalent. Terms (i.e. exact values) can +be searched for using _term_ queries. ++ +See also <> and <>. [[glossary-text]] text :: - Text (or full text) is ordinary unstructured text, such as this - paragraph. By default, text will be <> into - <>, which is what is actually stored in the index. - + - Text <> need to be analyzed at index time in order to - be searchable as full text, and keywords in full text queries must be - analyzed at search time to produce (and search for) the same terms - that were generated at index time. - + - See also <> and <>. +Text (or full text) is ordinary unstructured text, such as this +paragraph. By default, text will be <> into +<>, which is what is actually stored in the index. ++ +Text <> need to be analyzed at index time in order to +be searchable as full text, and keywords in full text queries must be +analyzed at search time to produce (and search for) the same terms +that were generated at index time. ++ +See also <> and <>. [[glossary-type]] type :: - A type used to represent the _type_ of document, e.g. an `email`, a `user`, or a `tweet`. - Types are deprecated and are in the process of being removed. See <>. +A type used to represent the _type_ of document, e.g. an `email`, a `user`, or a `tweet`. +Types are deprecated and are in the process of being removed. See <>. diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index 713e03d188ad9..a475122b99ec2 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -91,6 +91,7 @@ PUT index // CONSOLE [float] +[[default-dynamic-string-mapping]] === Don't use default dynamic string mappings The default <> will index string fields @@ -133,11 +134,13 @@ Larger shards are going to be more efficient at storing data. To increase the si Keep in mind that large shard sizes come with drawbacks, such as long full recovery times. [float] +[[disable-source]] === Disable `_source` The <> field stores the original JSON body of the document. If you don’t need access to it you can disable it. However, APIs that needs access to `_source` such as update and reindex won’t work. [float] +[[best-compression]] === Use `best_compression` The `_source` and stored fields can easily take a non negligible amount of disk diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index 3661d8ce07d68..f4c224829a456 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -17,6 +17,7 @@ it is advisable to avoid going beyond a couple tens of megabytes per request even if larger requests seem to perform better. [float] +[[multiple-workers-threads]] === Use multiple workers/threads to send data to Elasticsearch A single thread sending bulk requests is unlikely to be able to max out the @@ -36,12 +37,26 @@ number of workers is. This can be tested by progressively increasing the number of workers until either I/O or CPU is saturated on the cluster. [float] -=== Increase the refresh interval - -The default <> is `1s`, which -forces Elasticsearch to create a new segment every second. -Increasing this value (to say, `30s`) will allow larger segments to flush and -decreases future merge pressure. +=== Unset or increase the refresh interval + +The operation that consists of making changes visible to search - called a +<> - is costly, and calling it often while there is +ongoing indexing activity can hurt indexing speed. + +By default, Elasticsearch runs this operation every second, but only on +indices that have received one search request or more in the last 30 seconds. +This is the optimal configuration if you have no or very little search traffic +(e.g. less than one search request every 5 minutes) and want to optimize for +indexing speed. This behavior aims to automatically optimize bulk indexing in +the default case when no searches are performed. In order to opt out of this +behavior set the refresh interval explicitly. + +On the other hand, if your index experiences regular search requests, this +default behavior means that Elasticsearch will refresh your index every 1 +second. If you can afford to increase the amount of time between when a document +gets indexed and when it becomes visible, increasing the +<> to a larger value, e.g. +`30s`, might help improve indexing speed. [float] === Disable refresh and replicas for initial loads @@ -114,13 +129,6 @@ The default is `10%` which is often plenty: for example, if you give the JVM 10GB of memory, it will give 1GB to the index buffer, which is enough to host two shards that are heavily indexing. -[float] -=== Disable `_field_names` - -The <> introduces some -index-time overhead, so you might want to disable it if you never need to -run `exists` queries. - [float] === Additional optimizations diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc index 451e192ad6ad2..b46f624aef51d 100644 --- a/docs/reference/how-to/recipes.asciidoc +++ b/docs/reference/how-to/recipes.asciidoc @@ -3,9 +3,9 @@ This section includes a few recipes to help with common problems: -* <> -* <> +* <> +* <> +* <> include::recipes/stemming.asciidoc[] include::recipes/scoring.asciidoc[] - diff --git a/docs/reference/how-to/recipes/scoring.asciidoc b/docs/reference/how-to/recipes/scoring.asciidoc index 09c0e585765d6..25425277839f7 100644 --- a/docs/reference/how-to/recipes/scoring.asciidoc +++ b/docs/reference/how-to/recipes/scoring.asciidoc @@ -60,8 +60,8 @@ request do not have similar index statistics and relevancy could be bad. If you have a small dataset, the easiest way to work around this issue is to index everything into an index that has a single shard -(`index.number_of_shards: 1`). Then index statistics will be the same for all -documents and scores will be consistent. +(`index.number_of_shards: 1`), which is the default. Then index statistics +will be the same for all documents and scores will be consistent. Otherwise the recommended way to work around this issue is to use the <> search type. This will make @@ -78,3 +78,125 @@ queries, beware that gathering statistics alone might not be cheap since all terms have to be looked up in the terms dictionaries in order to look up statistics. +[[static-scoring-signals]] +=== Incorporating static relevance signals into the score + +Many domains have static signals that are known to be correlated with relevance. +For instance https://en.wikipedia.org/wiki/PageRank[PageRank] and url length are +two commonly used features for web search in order to tune the score of web +pages independently of the query. + +There are two main queries that allow combining static score contributions with +textual relevance, eg. as computed with BM25: + - <> + - <> + +For instance imagine that you have a `pagerank` field that you wish to +combine with the BM25 score so that the final score is equal to +`score = bm25_score + pagerank / (10 + pagerank)`. + +With the <> the query would +look like this: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT index +{ + "mappings": { + "properties": { + "body": { + "type": "text" + }, + "pagerank": { + "type": "long" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET index/_search +{ + "query" : { + "script_score" : { + "query" : { + "match": { "body": "elasticsearch" } + }, + "script" : { + "source" : "_score * saturation(doc['pagerank'].value, 10)" <1> + } + } + } +} +-------------------------------------------------- +// CONSOLE +//TEST[continued] +<1> `pagerank` must be mapped as a <> + +while with the <> it would +look like below: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT index +{ + "mappings": { + "properties": { + "body": { + "type": "text" + }, + "pagerank": { + "type": "rank_feature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET _search +{ + "query" : { + "bool" : { + "must": { + "match": { "body": "elasticsearch" } + }, + "should": { + "rank_feature": { + "field": "pagerank", <1> + "saturation": { + "pivot": 10 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> `pagerank` must be mapped as a <> field + +While both options would return similar scores, there are trade-offs: +<> provides a lot of flexibility, +enabling you to combine the text relevance score with static signals as you +prefer. On the other hand, the <> only +exposes a couple ways to incorporate static signails into the score. However, +it relies on the <> and +<> fields, which index values in a special way +that allows the <> to skip +over non-competitive documents and get the top matches of a query faster. diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index e8c213646578c..d7ddda116327e 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -171,7 +171,7 @@ the query need to be matched exactly while other parts should still take stemming into account? Fortunately, the `query_string` and `simple_query_string` queries have a feature -that solve this exact problem: `quote_field_suffix`. This tell Elasticsearch +that solves this exact problem: `quote_field_suffix`. This tells Elasticsearch that the words that appear in between quotes are to be redirected to a different field, see below: diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index b136c953b8f02..38575942bd021 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -161,6 +161,7 @@ GET index/_search // TEST[continued] [float] +[[map-ids-as-keyword]] === Consider mapping identifiers as `keyword` The fact that some data is numeric does not mean it should always be mapped as a @@ -339,8 +340,8 @@ If the machine running Elasticsearch is restarted, the filesystem cache will be empty, so it will take some time before the operating system loads hot regions of the index into memory so that search operations are fast. You can explicitly tell the operating system which files should be loaded into memory eagerly -depending on the file extension using the <> -setting. +depending on the file extension using the +<> setting. WARNING: Loading data into the filesystem cache eagerly on too many indices or too many files will make search _slower_ if the filesystem cache is not large @@ -354,6 +355,7 @@ conjunctions faster at the cost of slightly slower indexing. Read more about it in the <>. [float] +[[preference-cache-optimization]] === Use `preference` to optimize cache utilization There are multiple caches that can help with search performance, such as the @@ -395,15 +397,6 @@ be able to cope with `max_failures` node failures at once at most, then the right number of replicas for you is `max(max_failures, ceil(num_nodes / num_primaries) - 1)`. -[float] -=== Turn on adaptive replica selection - -When multiple copies of data are present, elasticsearch can use a set of -criteria called <> to select -the best copy of the data based on response time, service time, and queue size -of the node containing each copy of the shard. This can improve query throughput -and reduce latency for search-heavy applications. - === Tune your queries with the Profile API You can also analyse how expensive each component of your queries and @@ -419,3 +412,19 @@ Some caveats to the Profile API are that: - the Profile API as a debugging tool adds significant overhead to search execution and can also have a very verbose output - given the added overhead, the resulting took times are not reliable indicators of actual took time, but can be used comparatively between clauses for relative timing differences - the Profile API is best for exploring possible reasons behind the most costly clauses of a query but isn't intended for accurately measuring absolute timings of each clause + +[[faster-phrase-queries]] +=== Faster phrase queries with `index_phrases` + +The <> field has an <> option that +indexes 2-shingles and is automatically leveraged by query parsers to run phrase +queries that don't have a slop. If your use-case involves running lots of phrase +queries, this can speed up queries significantly. + +[[faster-prefix-queries]] +=== Faster prefix queries with `index_prefixes` + +The <> field has an <> option that +indexes prefixes of all terms and is automatically leveraged by query parsers to +run prefix queries. If your use-case involves running lots of prefix queries, +this can speed up queries significantly. diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 66762ead9eb32..26c2cb2d26b75 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -227,7 +227,7 @@ information for the step that's being performed on the index. <1> Status of the step that's in progress. If the index is in the ERROR step, something went wrong while executing a -step in the policy and and you will need to take action for the index to proceed +step in the policy and you will need to take action for the index to proceed to the next step. To help you diagnose the problem, the explain response shows the step that failed and the step info provides information about the error. diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 5e67e26cb160c..af8a4deffcbc1 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -2,6 +2,8 @@ [testenv="basic"] [[ilm-get-status]] === Get {ilm} status API + +[subs="attributes"] ++++ Get {ilm} status ++++ diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 92ca2a9806379..c13f5edcf11cc 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -2,6 +2,8 @@ [testenv="basic"] [[ilm-start]] === Start {ilm} API + +[subs="attributes"] ++++ Start {ilm} ++++ diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index dee1cf4fa3579..1629990c5fd86 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -2,6 +2,8 @@ [testenv="basic"] [[ilm-stop]] === Stop {ilm} API + +[subs="attributes"] ++++ Stop {ilm} ++++ diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index f06c95f49c067..3d1935721643b 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -15,7 +15,9 @@ our writing index. We wish to roll over the index after it reaches a size of 50 gigabytes, or has been created 30 days ago, and then delete the index after 90 days. -=== Setting up a new policy +[float] +[[ilm-gs-create-policy]] +=== Setting up a policy There are many new features introduced by {ilm-init}, but we will only focus on a few that are needed for our example. For starters, we will use the @@ -64,6 +66,8 @@ the index being written to after it reaches 50 gigabytes, or it is 30 days old. The rollover will occur when either of these conditions is true. The index will be deleted 90 days after it is rolled over. +[float] +[[ilm-gs-apply-policy]] === Applying a policy to our index There are <> to associate a @@ -135,6 +139,8 @@ index being the index that is being written to at a time. Rollover swaps the write index to be the new index created from rollover, and sets the alias to be read-only for the source index. +[float] +[[ilm-gs-check-progress]] === Checking progress Now that we have an index managed by our policy, how do we tell what is going diff --git a/docs/reference/ilm/ilm-with-existing-indices.asciidoc b/docs/reference/ilm/ilm-with-existing-indices.asciidoc new file mode 100644 index 0000000000000..60aff62b714c2 --- /dev/null +++ b/docs/reference/ilm/ilm-with-existing-indices.asciidoc @@ -0,0 +1,416 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-with-existing-indices]] +== Using {ilm-init} with existing indices + +While it is recommended to use {ilm-init} to manage the index lifecycle from +start to finish, it may be useful to use {ilm-init} with existing indices, +particularly when transitioning from an alternative method of managing the index +lifecycle such as Curator, or when migrating from daily indices to +rollover-based indices. Such use cases are fully supported, but there are some +configuration differences from when {ilm-init} can manage the complete index +lifecycle. + +This section describes strategies to leverage {ilm-init} for existing periodic +indices when migrating to fully {ilm-init}-manged indices, which can be done in +a few different ways, each providing different tradeoffs. As an example, we'll +walk through a use case of a very simple logging index with just a field for the +log message and a timestamp. + +First, we need to create a template for these indices: + +[source,js] +----------------------- +PUT _template/mylogs_template +{ + "index_patterns": [ + "mylogs-*" + ], + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1 + }, + "mappings": { + "properties": { + "message": { + "type": "text" + }, + "@timestamp": { + "type": "date" + } + } + } +} +----------------------- +// CONSOLE +// TEST + +And we'll ingest a few documents to create a few daily indices: + +[source,js] +----------------------- +POST mylogs-pre-ilm-2019.06.24/_doc +{ + "@timestamp": "2019-06-24T10:34:00", + "message": "this is one log message" +} +----------------------- +// CONSOLE +// TEST[continued] + +[source,js] +----------------------- +POST mylogs-pre-ilm-2019.06.25/_doc +{ + "@timestamp": "2019-06-25T17:42:00", + "message": "this is another log message" +} +----------------------- +// CONSOLE +// TEST[continued] + +Now that we have these indices, we'll look at a few different ways of migrating +these indices to ILM. + +[[ilm-with-existing-periodic-indices]] +=== Managing existing periodic indices with {ilm-init} + +NOTE: The examples in this section assume daily indices as set up in +<>. + +The simplest way to manage existing indices while transitioning to fully +{ilm-init}-managed indices is to allow all new indices to be fully managed by +{ilm-init} before attaching {ilm-init} policies to existing indices. To do this, +all new documents should be directed to {ilm-init}-managed indices - if you are +using Beats or Logstash data shippers, upgrading all of those shippers to +version 7.0.0 or higher will take care of that part for you. If you are not +using Beats or Logstash, you may need to set up ILM for new indices yourself as +demonstrated in the <>. + +NOTE: If you are using Beats through Logstash, you may need to change your +Logstash output configuration and invoke the Beats setup to use ILM for new +data. + +Once all new documents are being written to fully {ilm-init}-managed indices, it +is easy to add an {ilm-init} policy to existing indices. However, there are two +things to keep in mind when doing this, and a trick that makes those two things +much easier to handle. + +The two biggest things to keep in mind are: + +1. Existing periodic indices shouldn't use policies with rollover, because +rollover is used to manage where new data goes. Since existing indices should no +longer be receiving new documents, there is no point to using rollover for them. + +2. {ilm-init} policies attached to existing indices will compare the `min_age` +for each phase to the original creation date of the index, and so might proceed +through multiple phases immediately. + +The first one is the most important, because it makes it difficult to use the +same policy for new and existing periodic indices. But that's easy to solve +with one simple trick: Create a second policy for existing indices, in addition +to the one for new indices. {ilm-init} policies are cheap to create, so don't be +afraid to have more than one. Modifying a policy designed for new indices to be +used on existing indices is generally very simple: just remove the `rollover` +action. + +For example, if you created a policy for your new indices with each phase +like so: +[source,js] +----------------------- +PUT _ilm/policy/mylogs_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "warm": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "cold": { + "min_age": "7d", + "actions": { + "freeze": {} + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +You can create a policy for pre-existing indices by removing the `rollover` +action, and in this case, the `hot` phase is now empty so we can remove that +too: + +[source,js] +----------------------- +PUT _ilm/policy/mylogs_policy_existing +{ + "policy": { + "phases": { + "warm": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "cold": { + "min_age": "7d", + "actions": { + "freeze": {} + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +Creating a separate policy for existing indices will also allow using different +`min_age` values. You may want to use higher values to prevent many indices from +running through the policy at once, which may be important if your policy +includes potentially resource-intensive operations like force merge. + +You can configure the lifecycle for many indices at once by using wildcards in +the index name when calling the <> +to set the policy name, but be careful that you don't include any indices that +you don't want to change the policy for: + +[source,js] +----------------------- +PUT mylogs-pre-ilm*/_settings <1> +{ + "index": { + "lifecycle": { + "name": "mylogs_policy_existing" + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +<1> This pattern will match all indices with names that start with +`mylogs-pre-ilm` + +Once all pre-{ilm-init} indices have aged out and been deleted, the policy for +older periodic indices can be deleted. + +[[ilm-reindexing-into-rollover]] +=== Reindexing via {ilm-init} + +NOTE: The examples in this section assume daily indices as set up in +<>. + +In some cases, it may be useful to reindex data into {ilm-init}-managed indices. +This is more complex than simply attaching policies to existing indices as +described in <>, and +requires pausing indexing during the reindexing process. However, this technique +may be useful in cases where periodic indices were created with very small +amounts of data leading to excessive shard counts, or for indices which grow +steadily over time, but have not been broken up into time-series indices leading +to shards which are much too large, situations that cause significant +performance problems. + +Before getting started with reindexing data, the new index structure should be +set up. For this section, we'll be using the same setup described in +<>. + +First, we'll set up a policy with rollover, and can include any additional +phases required. For simplicity, we'll just use rollover: + +[source,js] +----------------------- +PUT _ilm/policy/sample_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_age": "7d", + "max_size": "50G" + } + } + } + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +And now we'll update the index template for our indices to include the relevant +{ilm-init} settings: + +[source,js] +----------------------- +PUT _template/mylogs_template +{ + "index_patterns": [ + "ilm-mylogs-*" <1> + ], + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index": { + "lifecycle": { + "name": "mylogs_condensed_policy", <2> + "rollover_alias": "mylogs" <3> + } + } + }, + "mappings": { + "properties": { + "message": { + "type": "text" + }, + "@timestamp": { + "type": "date" + } + } + } +} +----------------------- +// CONSOLE +// TEST[continued] +<1> The new index pattern has a prefix compared to the old one, this will + make it easier to reindex later +<2> The name of the policy we defined above +<3> The name of the alias we'll use to write to and query + +And create the first index with the alias specified in the `rollover_alias` +setting in the index template: + +[source,js] +----------------------- +PUT ilm-mylogs-000001 +{ + "aliases": { + "mylogs": { + "is_write_index": true + } + } +} +----------------------- +// CONSOLE +// TEST[continued] + +All new documents should be indexed via the `mylogs` alias at this point. Adding +new data to the old indices during the reindexing process can cause data to be +added to the old indices, but not be reindexed into the new indices. + +NOTE: If you do not want to mix new data and old data in the new ILM-managed +indices, indexing of new data should be paused entirely while the reindex +completes. Mixing old and new data within one index is safe, but keep in mind +that the indices with mixed data should be retained in their entirety until you +are ready to delete both the old and new data. + +By default, {ilm-init} only checks rollover conditions every 10 minutes. Under +normal indexing load, this usually works well, but during reindexing, indices +can grow very, very quickly. We'll need to set the poll interval to something +shorter to ensure that the new indices don't grow too large while waiting for +the rollover check: + +[source,js] +----------------------- +PUT _cluster/settings +{ + "transient": { + "indices.lifecycle.poll_interval": "1m" <1> + } +} +----------------------- +// CONSOLE +// TEST[skip:don't want to overwrite this setting for other tests] +<1> This tells ILM to check for rollover conditions every minute + +We're now ready to reindex our data using the <>. If +you have a timestamp or date field in your documents, as in this example, it may +be useful to specify that the documents should be sorted by that field - this +will mean that all documents in `ilm-mylogs-000001` come before all documents in +`ilm-mylogs-000002`, and so on. However, if this is not a requirement, omitting +the sort will allow the data to be reindexed more quickly. + +IMPORTANT: If your data uses document IDs generated by means other than +Elasticsearch's automatic ID generation, you may need to do additional +processing to ensure that the document IDs don't conflict during the reindex, as +documents will retain their original IDs. One way to do this is to use a +<> in the reindex call to append the original index name +to the document ID. + +[source,js] +----------------------- +POST _reindex +{ + "source": { + "index": "mylogs-*", <1> + "sort": { "@timestamp": "desc" } + }, + "dest": { + "index": "mylogs", <2> + "op_type": "create" <3> + } +} +----------------------- +// CONSOLE +// TEST[continued] +<1> This index pattern matches our existing indices. Using the prefix for + the new indices makes using this index pattern much easier. +<2> The alias set up above +<3> This option will cause the reindex to abort if it encounters multiple + documents with the same ID. This is optional, but recommended to prevent + accidentally overwriting documents if two documents from different indices + have the same ID. + +Once this completes, indexing new data can be resumed, as long as all new +documents are indexed into the alias used above. All data, existing and new, can +be queried using that alias as well. We should also be sure to set the +{ilm-init} poll interval back to its default value, because keeping it set too +low can cause unnecessary load on the current master node: + +[source,js] +----------------------- +PUT _cluster/settings +{ + "transient": { + "indices.lifecycle.poll_interval": null + } +} + +----------------------- +// CONSOLE +// TEST[skip:don't want to overwrite this setting for other tests] + +All of the reindexed data should now be accessible via the alias set up above, +in this case `mylogs`. Once you have verified that all the data has been +reindexed and is available in the new indices, the existing indices can be +safely removed. \ No newline at end of file diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index b906f9ade4447..757c403608fa5 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -73,3 +73,5 @@ include::error-handling.asciidoc[] include::ilm-and-snapshots.asciidoc[] include::start-stop-ilm.asciidoc[] + +include::ilm-with-existing-indices.asciidoc[] diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index e16b414504a64..5a6f42413b097 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -66,8 +66,11 @@ index is rolled over, then `min_age` is the time elapsed from the time the index is rolled over. The intention here is to execute following phases and actions relative to when data was written last to a rolled over index. -The previous phase's actions must complete before {ilm} will check `min_age` -and transition into the next phase. +The previous phase's actions must complete before {ilm} will check `min_age` and +transition into the next phase. By default, {ilm} checks for indices that meet +policy criteria, like `min_age`, every 10 minutes. You can use the +`indices.lifecycle.poll_interval` cluster setting to control how often this +check occurs. === Phase Execution @@ -84,22 +87,26 @@ executing. The below list shows the actions which are available in each phase. +NOTE: The order that configured actions are performed in within each phase is +determined by automatically by {ilm-init}, and cannot be changed by changing the +policy definition. + * Hot - <> - - <> - <> + - <> * Warm - <> - - <> + - <> - <> - - <> + - <> - <> - - <> + - <> * Cold - <> + - <> - <> - <> - - <> * Delete - <> @@ -116,9 +123,9 @@ configuring allocation rules is optional. When configuring allocation rules, setting number of replicas is optional. Although this action can be treated as two separate index settings updates, both can be configured at once. -Read more about index replicas <>. -Read more about shard allocation filtering in -the <>. +For more information about how {es} uses replicas for scaling, see +<>. See <> for more information about +controlling where Elasticsearch allocates shards of a particular index. [[ilm-allocate-options]] .Allocate Options @@ -361,12 +368,12 @@ follower index into a regular index as if <> had been used instead of rolling over. For example, if an index to be managed has an alias `my_data`. The managed -index "my_index" must be the write index for the alias. For more information, read +index "my_index-000001" must be the write index for the alias. For more information, read <>. [source,js] -------------------------------------------------- -PUT my_index +PUT my_index-000001 { "settings": { "index.lifecycle.name": "my_policy", @@ -390,7 +397,7 @@ existing index meets one of the rollover conditions. [options="header"] |====== | Name | Required | Default | Description -| `max_size` | no | - | max index storage size. +| `max_size` | no | - | max primary shard index storage size. See <> for formatting | `max_docs` | no | - | max number of documents an diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index e5366f028a9c7..22ca0ae48fd98 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -68,8 +68,9 @@ If the request does not encounter errors, you receive the following result: The operating modes of ILM: - +[[ilm-operating-modes]] .ILM Operating Modes +[options="header"] |=== |Name |Description |RUNNING |Normal operation where all policies are executed as normal diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index dbabbd3333635..11be1bed85a16 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -28,11 +28,13 @@ new index. The rollover action takes the following parameters: +[[rollover-action-params]] .`rollover` Action Parameters +[options="header"] |=== |Name |Description -|max_size |The maximum estimated size the index is allowed to grow -to. Defaults to `null`. Optional. +|max_size |The maximum estimated size the primary shard of the index is allowed +to grow to. Defaults to `null`. Optional. |max_docs |The maximum number of document the index should contain. Defaults to `null`. Optional. |max_age |The maximum age of the index. Defaults to `null`. Optional. diff --git a/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-results.png b/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-results.png new file mode 100644 index 0000000000000..318a63d3a9d49 Binary files /dev/null and b/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-results.png differ diff --git a/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-search.png b/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-search.png new file mode 100644 index 0000000000000..1f2233a8bc21d Binary files /dev/null and b/docs/reference/images/ccs/ccs-dont-min-roundtrip-shard-search.png differ diff --git a/docs/reference/images/ccs/ccs-min-roundtrip-client-request.png b/docs/reference/images/ccs/ccs-min-roundtrip-client-request.png new file mode 100644 index 0000000000000..bf023da054bd0 Binary files /dev/null and b/docs/reference/images/ccs/ccs-min-roundtrip-client-request.png differ diff --git a/docs/reference/images/ccs/ccs-min-roundtrip-client-response.png b/docs/reference/images/ccs/ccs-min-roundtrip-client-response.png new file mode 100644 index 0000000000000..d310de52c2484 Binary files /dev/null and b/docs/reference/images/ccs/ccs-min-roundtrip-client-response.png differ diff --git a/docs/reference/images/ccs/ccs-min-roundtrip-cluster-results.png b/docs/reference/images/ccs/ccs-min-roundtrip-cluster-results.png new file mode 100644 index 0000000000000..e4b64f7ec25ee Binary files /dev/null and b/docs/reference/images/ccs/ccs-min-roundtrip-cluster-results.png differ diff --git a/docs/reference/images/ccs/ccs-min-roundtrip-cluster-search.png b/docs/reference/images/ccs/ccs-min-roundtrip-cluster-search.png new file mode 100644 index 0000000000000..d34d0d00f1b19 Binary files /dev/null and b/docs/reference/images/ccs/ccs-min-roundtrip-cluster-search.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png index 2307f03932663..bf7f1c63135af 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png index 1ca209a57e555..f63df0987c167 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png and b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png index 7561e94bdd991..825ce1b6357fb 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png and b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png index 62cef87a7ae9d..bcad2a75d801e 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png and b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png index 70f2a1dd4dc2f..c76ae19937a08 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-6-data.png b/docs/reference/images/sql/client-apps/dbeaver-6-data.png index 5d33441fe3b8c..053042b791116 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-6-data.png and b/docs/reference/images/sql/client-apps/dbeaver-6-data.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png index b0ff89cc9d75a..cde4d9cc7cf26 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png and b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-2-driver.png b/docs/reference/images/sql/client-apps/dbvis-2-driver.png index b0f3a2927c968..cae3824547bc3 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-2-driver.png and b/docs/reference/images/sql/client-apps/dbvis-2-driver.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png index 7f89cf84a8e62..332895a2c8a8b 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png and b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png index 2027949c401a7..d854dc826b1e1 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png and b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-5-data.png b/docs/reference/images/sql/client-apps/dbvis-5-data.png index fb5ce8b86aa74..c67336568edc0 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-5-data.png and b/docs/reference/images/sql/client-apps/dbvis-5-data.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png index 9a9c2c2634e3c..29f06b7033d72 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png and b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png index 35f389747c970..a269e29d672ea 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png and b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png index d5587060d2eaa..1fc8e9ad60191 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png and b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-7-data.png b/docs/reference/images/sql/client-apps/squirell-7-data.png index 760ade7c670fb..70837963b74b5 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-7-data.png and b/docs/reference/images/sql/client-apps/squirell-7-data.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png index 03e740f400ae1..659cfd0c40760 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png and b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-3-connection.png b/docs/reference/images/sql/client-apps/workbench-3-connection.png index 32643375e3de9..9262ef0f533a2 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-3-connection.png and b/docs/reference/images/sql/client-apps/workbench-3-connection.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-4-data.png b/docs/reference/images/sql/client-apps/workbench-4-data.png index 602f09d06e46f..7b8251fc9588a 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-4-data.png and b/docs/reference/images/sql/client-apps/workbench-4-data.png differ diff --git a/docs/reference/images/sql/rest/console-triple-quotes.png b/docs/reference/images/sql/rest/console-triple-quotes.png new file mode 100644 index 0000000000000..4a13acb986114 Binary files /dev/null and b/docs/reference/images/sql/rest/console-triple-quotes.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 1442f0e3e558e..306af992fc124 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -36,7 +36,7 @@ specific index module: `index.number_of_shards`:: - The number of primary shards that an index should have. Defaults to 5. + The number of primary shards that an index should have. Defaults to 1. This setting can only be set at index creation time. It cannot be changed on a closed index. Note: the number of shards are limited to `1024` per index. This limitation is a safety limit to prevent accidental creation of indices @@ -45,26 +45,17 @@ specific index module: part of the cluster. `index.shard.check_on_startup`:: -+ --- -Whether or not shards should be checked for corruption before opening. When -corruption is detected, it will prevent the shard from being opened. Accepts: - -`false`:: - - (default) Don't check for corruption when opening a shard. - -`checksum`:: - - Check for physical corruption. - -`true`:: - - Check for both physical and logical corruption. This is much more - expensive in terms of CPU and memory usage. -WARNING: Expert only. Checking shards may take a lot of time on large indices. --- +Whether or not shards should be checked for corruption before opening. When +corruption is detected, it will prevent the shard from being opened. +Accepts: +`false`::: (default) Don't check for corruption when opening a shard. +`checksum`::: Check for physical corruption. +`true`::: Check for both physical and logical corruption. This is much more +expensive in terms of CPU and memory usage. ++ +WARNING: Expert only. Checking shards may take a lot of time on large +indices. [[index-codec]] `index.codec`:: @@ -83,6 +74,11 @@ WARNING: Expert only. Checking shards may take a lot of time on large indices. than the `index.number_of_shards` unless the `index.number_of_shards` value is also 1. See <> for more details about how this setting is used. +[[load-fixed-bitset-filters-eagerly]] `index.load_fixed_bitset_filters_eagerly`:: + + Indicates whether <> are pre-loaded for + nested queries. Possible values are `true` (default) and `false`. + [float] [[dynamic-index-settings]] === Dynamic index settings @@ -179,8 +175,8 @@ specific index module: `index.blocks.write`:: - Set to `true` to disable data write operations against the index. Unlike `read_only`, - this setting does not affect metadata. For instance, you can close an index with a `write` + Set to `true` to disable data write operations against the index. Unlike `read_only`, + this setting does not affect metadata. For instance, you can close an index with a `write` block, but not an index with a `read_only` block. `index.blocks.metadata`:: @@ -203,11 +199,13 @@ specific index module: This setting is only applicable when highlighting is requested on a text that was indexed without offsets or term vectors. Defaults to `1000000`. +[[index-max-terms-count]] `index.max_terms_count`:: The maximum number of terms that can be used in Terms Query. Defaults to `65536`. +[[index-max-regex-length]] `index.max_regex_length`:: The maximum length of regex that can be used in Regexp Query. @@ -280,6 +278,13 @@ Other index settings are available in index modules: Control over the transaction log and background flush operations. +[float] +[[x-pack-index-settings]] +=== [xpack]#{xpack} index settings# + +<>:: + + Specify the lifecycle policy and rollover alias for an index. -- include::index-modules/analysis.asciidoc[] diff --git a/docs/reference/index-modules/allocation/filtering.asciidoc b/docs/reference/index-modules/allocation/filtering.asciidoc index 9e7a67946a997..0ae331d0e446d 100644 --- a/docs/reference/index-modules/allocation/filtering.asciidoc +++ b/docs/reference/index-modules/allocation/filtering.asciidoc @@ -49,6 +49,7 @@ settings support three types of filters: `include`, `exclude`, and `require`. For example, to tell {es} to allocate shards from the `test` index to either `big` or `medium` nodes, use `index.routing.allocation.include`: + +-- [source,js] ------------------------ PUT test/_settings @@ -58,11 +59,11 @@ PUT test/_settings ------------------------ // CONSOLE // TEST[s/^/PUT test\n/] -+ + If you specify multiple filters, all conditions must be satisfied for shards to be relocated. For example, to move the `test` index to `big` nodes in `rack1`, you could specify: -+ + [source,js] ------------------------ PUT test/_settings @@ -73,6 +74,7 @@ PUT test/_settings ------------------------ // CONSOLE // TEST[s/^/PUT test\n/] +-- [float] [[index-allocation-settings]] diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 014923d463cbd..ee6cf3958375b 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -92,22 +92,14 @@ from randomness] framework. This similarity has the following options: [horizontal] `basic_model`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`be`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelD.html[`d`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIF.html[`if`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelP.html[`p`]. - -`be`, `d` and `p` should be avoided in practice as they might return scores that -are equal to 0 or infinite with terms that do not meet the expected random -distribution. + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`]. `after_effect`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffect.NoAfterEffect.html[`no`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectL.html[`l`]. + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`l`]. `normalization`:: Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/Normalization.NoNormalization.html[`no`], diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index 235256bdce7c0..a96c8fe995b75 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -26,7 +26,26 @@ index.search.slowlog.threshold.fetch.trace: 200ms index.search.slowlog.level: info -------------------------------------------------- -All of the above settings are _dynamic_ and are set per-index. +All of the above settings are _dynamic_ and can be set for each index using the +<> API. For example: + +[source,js] +-------------------------------------------------- +PUT /twitter/_settings +{ + "index.search.slowlog.threshold.query.warn": "10s", + "index.search.slowlog.threshold.query.info": "5s", + "index.search.slowlog.threshold.query.debug": "2s", + "index.search.slowlog.threshold.query.trace": "500ms", + "index.search.slowlog.threshold.fetch.warn": "1s", + "index.search.slowlog.threshold.fetch.info": "800ms", + "index.search.slowlog.threshold.fetch.debug": "500ms", + "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.level": "info" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] By default, none are enabled (set to `-1`). Levels (`warn`, `info`, `debug`, `trace`) allow to control under which logging level the log @@ -83,7 +102,23 @@ index.indexing.slowlog.level: info index.indexing.slowlog.source: 1000 -------------------------------------------------- -All of the above settings are _dynamic_ and are set per-index. +All of the above settings are _dynamic_ and can be set for each index using the +<> API. For example: + +[source,js] +-------------------------------------------------- +PUT /twitter/_settings +{ + "index.indexing.slowlog.threshold.index.warn": "10s", + "index.indexing.slowlog.threshold.index.info": "5s", + "index.indexing.slowlog.threshold.index.debug": "2s", + "index.indexing.slowlog.threshold.index.trace": "500ms", + "index.indexing.slowlog.level": "info", + "index.indexing.slowlog.source": "1000" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] By default Elasticsearch will log the first 1000 characters of the _source in the slowlog. You can change that with `index.indexing.slowlog.source`. Setting diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 8c1b99a42f2a6..c737b4ddc151f 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -84,7 +84,8 @@ setting is useful, for example, if you are in an environment where you can not control the ability to create a lot of memory maps so you need disable the ability to use memory-mapping. -=== Pre-loading data into the file system cache +[[preload-data-to-file-system-cache]] +=== Preloading data into the file system cache NOTE: This is an expert setting, the details of which may change in the future. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 208829481514c..0e3c0d1d9cab3 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -10,23 +10,18 @@ include::../Versions.asciidoc[] +include::intro.asciidoc[] + include::getting-started.asciidoc[] include::setup.asciidoc[] include::setup/setup-xes.asciidoc[] -include::monitoring/configuring-monitoring.asciidoc[] - -include::{xes-repo-dir}/security/configuring-es.asciidoc[] - include::setup/setup-xclient.asciidoc[] -include::settings/configuring-xes.asciidoc[] - include::setup/bootstrap-checks-xes.asciidoc[] -:edit_url: include::upgrade.asciidoc[] include::api-conventions.asciidoc[] @@ -45,6 +40,8 @@ include::cluster.asciidoc[] include::query-dsl.asciidoc[] +include::scripting.asciidoc[] + include::mapping.asciidoc[] include::analysis.asciidoc[] @@ -65,11 +62,16 @@ include::rollup/index.asciidoc[] include::frozen-indices.asciidoc[] +include::administering.asciidoc[] + include::rest-api/index.asciidoc[] +include::{xes-repo-dir}/security/index.asciidoc[] + +include::{xes-repo-dir}/watcher/index.asciidoc[] + include::commands/index.asciidoc[] -:edit_url: include::how-to.asciidoc[] include::testing.asciidoc[] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 5044e19278da9..408cd0c5484cf 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -269,12 +269,18 @@ POST /_aliases "alias" : "alias1", "is_write_index" : true } + }, + { + "add" : { + "index" : "test2", + "alias" : "alias1" + } } ] } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT test\n/] +// TEST[s/^/PUT test\nPUT test2\n/] In this example, we associate the alias `alias1` to both `test` and `test2`, where `test` will be the index chosen for writing to. @@ -311,13 +317,13 @@ POST /_aliases "add" : { "index" : "test", "alias" : "alias1", - "is_write_index" : true + "is_write_index" : false } }, { "add" : { "index" : "test2", "alias" : "alias1", - "is_write_index" : false + "is_write_index" : true } } ] diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index ea8667aa1b713..a03d2bb248dc4 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -103,8 +103,7 @@ which returns something similar to: "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no" : "0", - "retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica" + "min_retained_seq_no" : "0" }, "num_docs" : 0 } @@ -119,7 +118,6 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"retention_leases" : "primary_term:1;version:1;id:replica-0;retaining_seq_no:0;timestamp:1547235588;source:replica"/"retention_leases": $body.indices.twitter.shards.0.0.commit.user_data.retention_leases/] <1> the `sync id` marker [float] diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 1730c95e0dd24..b5037d0a94233 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -1,8 +1,14 @@ [[indices-rollover-index]] == Rollover Index -The rollover index API rolls an alias over to a new index when the existing -index is considered to be too large or too old. +The rollover index API rolls an <> to a new index when +the existing index meets a condition you provide. You can use this API to retire +an index that becomes too large or too old. + +NOTE: To roll over an index, a condition must be met *when you call the API*. +{es} does not monitor the index after you receive an API response. To +automatically roll over indices when a condition is met, you can use {es}'s +<>. The API accepts a single alias name and a list of `conditions`. The alias must point to a write index for a Rollover request to be valid. There are two ways this can be achieved, and depending on the configuration, the @@ -18,6 +24,17 @@ from the original (rolled-over) index. In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`. +The available conditions are: + +[[index-rollover-conditions]] +.`conditions` parameters +[options="header"] +|=== +| Name | Description +| max_age | The maximum age of the index +| max_docs | The maximum number of documents the index should contain. This does not add documents multiple times for replicas +| max_size | The maximum estimated size of the primary shard of the index +|=== [source,js] -------------------------------------------------- diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 24a67208f7289..aa73d3d1350bb 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -17,7 +17,10 @@ Shrinking works as follows: * Then it hard-links segments from the source index into the target index. (If the file system doesn't support hard-linking, then all segments are copied - into the new index, which is a much more time consuming process.) + into the new index, which is a much more time consuming process. Also if using + multiple data paths, shards on different data paths require a full copy of + segment files if they are not on the same disk since hardlinks don’t work across + disks) * Finally, it recovers the target index as though it were a closed index which had just been re-opened. diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index ade0a8075d582..45c8da3bfbd57 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -23,7 +23,7 @@ shards in the original index. The default is designed to allow you to split by factors of 2 up to a maximum of 1024 shards. However, the original number of primary shards must taken into account. For instance, an index created with 5 primary shards could be split into 10, 20, 40, 80, 160, 320, or a -maximum of 740 shards (with a single split action or multiple split actions). +maximum of 640 shards (with a single split action or multiple split actions). If the original index contains one primary shard (or a multi-shard index has been <> down to a single primary shard), then the @@ -50,6 +50,7 @@ Splitting works as follows: had just been re-opened. [float] +[[incremental-resharding]] === Why doesn't Elasticsearch support incremental resharding? Going from `N` shards to `N+1` shards, aka. incremental resharding, is indeed a @@ -92,7 +93,7 @@ PUT my_source_index "index.number_of_shards" : 1 } } -------------------------------------------------- +-------------------------------------------------- // CONSOLE In order to split an index, the index must be marked as read-only, diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 578bf35cb2446..d127e260075cf 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -271,28 +271,6 @@ POST test/_doc/1?pipeline=drop_guests_network // CONSOLE // TEST[continued] -//// -Hidden example assertion: -[source,js] --------------------------------------------------- -GET test/_doc/1 --------------------------------------------------- -// CONSOLE -// TEST[continued] -// TEST[catch:missing] - -[source,js] --------------------------------------------------- -{ - "_index": "test", - "_type": "_doc", - "_id": "1", - "found": false -} --------------------------------------------------- -// TESTRESPONSE -//// - Thanks to the `?.` operator the following document will not throw an error. If the pipeline used a `.` the following document would throw a NullPointerException since the `network` object is not part of the source document. @@ -392,28 +370,6 @@ POST test/_doc/3?pipeline=drop_guests_network // CONSOLE // TEST[continued] -//// -Hidden example assertion: -[source,js] --------------------------------------------------- -GET test/_doc/3 --------------------------------------------------- -// CONSOLE -// TEST[continued] -// TEST[catch:missing] - -[source,js] --------------------------------------------------- -{ - "_index": "test", - "_type": "_doc", - "_id": "3", - "found": false -} --------------------------------------------------- -// TESTRESPONSE -//// - The `?.` operators works well for use in the `if` conditional because the {painless}/painless-operators-reference.html#null-safe-operator[null safe operator] returns null if the object is null and `==` is null safe (as well as many other @@ -511,28 +467,6 @@ POST test/_doc/1?pipeline=not_prod_dropper The document is <> since `prod` (case insensitive) is not found in the tags. -//// -Hidden example assertion: -[source,js] --------------------------------------------------- -GET test/_doc/1 --------------------------------------------------- -// CONSOLE -// TEST[continued] -// TEST[catch:missing] - -[source,js] --------------------------------------------------- -{ - "_index": "test", - "_type": "_doc", - "_id": "1", - "found": false -} --------------------------------------------------- -// TESTRESPONSE -//// - The following document is indexed (i.e. not dropped) since `prod` (case insensitive) is found in the tags. @@ -933,6 +867,7 @@ include::processors/gsub.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] +include::processors/lowercase.asciidoc[] include::processors/pipeline.asciidoc[] include::processors/remove.asciidoc[] include::processors/rename.asciidoc[] diff --git a/docs/reference/ingest/processors/date-index-name.asciidoc b/docs/reference/ingest/processors/date-index-name.asciidoc index 6dd54dab056e8..783ecc9b2b1b0 100644 --- a/docs/reference/ingest/processors/date-index-name.asciidoc +++ b/docs/reference/ingest/processors/date-index-name.asciidoc @@ -137,9 +137,9 @@ understands this to mean `2016-04-01` as is explained in the <>. | `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <>. -| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. +| `date_formats` | no | yyyy-MM-dd+++'T'+++HH:mm:ss.SSSXX | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. | `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. -| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. Supports <>. +| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. A valid java time pattern is expected here. Supports <>. include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/date.asciidoc b/docs/reference/ingest/processors/date.asciidoc index 17cb367afadaa..d797dffd8d435 100644 --- a/docs/reference/ingest/processors/date.asciidoc +++ b/docs/reference/ingest/processors/date.asciidoc @@ -14,7 +14,7 @@ in the same order they were defined as part of the processor definition. | Name | Required | Default | Description | `field` | yes | - | The field to get the date from. | `target_field` | no | @timestamp | The field that will hold the parsed date. -| `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. +| `formats` | yes | - | An array of the expected date formats. Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. | `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. include::common-options.asciidoc[] diff --git a/docs/reference/ingest/processors/dissect.asciidoc b/docs/reference/ingest/processors/dissect.asciidoc index 0c04e7ed07396..f2a7a42b9d668 100644 --- a/docs/reference/ingest/processors/dissect.asciidoc +++ b/docs/reference/ingest/processors/dissect.asciidoc @@ -78,12 +78,14 @@ include::common-options.asciidoc[] } -------------------------------------------------- // NOTCONSOLE + [[dissect-key-modifiers]] ==== Dissect key modifiers Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding modifiers. +[[dissect-key-modifiers-table]] .Dissect Key Modifiers [options="header"] |====== @@ -132,6 +134,7 @@ Right padding modifier with empty key example * level = WARN |====== +[[append-modifier]] ===== Append modifier (`+`) [[dissect-modifier-append-key]] Dissect supports appending two or more results together for the output. @@ -146,6 +149,7 @@ Append modifier example * name = john jacob jingleheimer schmidt |====== +[[append-order-modifier]] ===== Append with order modifier (`+` and `/n`) [[dissect-modifier-append-key-with-order]] Dissect supports appending two or more results together for the output. @@ -160,6 +164,7 @@ Append with order modifier example * name = schmidt,john,jingleheimer,jacob |====== +[[named-skip-key]] ===== Named skip key (`?`) [[dissect-modifier-named-skip-key]] Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability @@ -170,10 +175,11 @@ Named skip key modifier example | *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]` | *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] | *Result* a| -* ip = 1.2.3.4 +* clientip = 1.2.3.4 * @timestamp = 30/Apr/1998:22:00:52 +0000 |====== +[[reference-keys]] ===== Reference keys (`*` and `&`) [[dissect-modifier-reference-keys]] Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index f38e62806bb9d..7ce7b430ef610 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -10,7 +10,7 @@ under the CCA-ShareAlike 4.0 license. For more details see, http://dev.maxmind.c The `geoip` processor can run with other GeoIP2 databases from Maxmind. The files must be copied into the `ingest-geoip` config directory, and the `database_file` option should be used to specify the filename of the custom database. Custom database files must be stored -uncompressed. The `ingest-geoip` config directory is located at `$ES_HOME/config/ingest-geoip`. +uncompressed. The `ingest-geoip` config directory is located at `$ES_CONFIG/ingest-geoip`. [[using-ingest-geoip]] ==== Using the `geoip` Processor in a Pipeline @@ -27,7 +27,7 @@ uncompressed. The `ingest-geoip` config directory is located at `$ES_HOME/config | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== -*Depends on what is available in `database_field`: +*Depends on what is available in `database_file`: * If the GeoLite2 City database is used, then the following fields may be added under the `target_field`: `ip`, `country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `latitude`, `longitude` diff --git a/docs/reference/ingest/processors/script.asciidoc b/docs/reference/ingest/processors/script.asciidoc index 4a1ab5306d040..9e0dbe60b8d8b 100644 --- a/docs/reference/ingest/processors/script.asciidoc +++ b/docs/reference/ingest/processors/script.asciidoc @@ -101,8 +101,3 @@ The response from the above index request: In the above response, you can see that our document was actually indexed into `my_index` instead of `any_index`. This type of manipulation is often convenient in pipelines that have various branches of transformation, and depending on the progress made, indexed into different indices. - -[[set-processor]] -=== Set Processor -Sets one field and associates it with the specified value. If the field already exists, -its value will be replaced with the provided one. diff --git a/docs/reference/ingest/processors/set.asciidoc b/docs/reference/ingest/processors/set.asciidoc index 564594a05b0d6..c964182ef6573 100644 --- a/docs/reference/ingest/processors/set.asciidoc +++ b/docs/reference/ingest/processors/set.asciidoc @@ -1,3 +1,8 @@ +[[set-processor]] +=== Set Processor +Sets one field and associates it with the specified value. If the field already exists, +its value will be replaced with the provided one. + [[set-options]] .Set Options [options="header"] @@ -12,10 +17,69 @@ include::common-options.asciidoc[] [source,js] -------------------------------------------------- { + "description" : "sets the value of count to 1" "set": { - "field": "host.os.name", - "value": "{{os}}" + "field": "count", + "value": 1 } } -------------------------------------------------- // NOTCONSOLE + +This processor can also be used to copy data from one field to another. For example: + +[source,js] +-------------------------------------------------- +PUT _ingest/pipeline/set_os +{ + "description": "sets the value of host.os.name from the field os", + "processors": [ + { + "set": { + "field": "host.os.name", + "value": "{{os}}" + } + } + ] +} + +POST _ingest/pipeline/set_os/_simulate +{ + "docs": [ + { + "_source": { + "os": "Ubuntu" + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Result: +[source,js] +-------------------------------------------------- +{ + "docs" : [ + { + "doc" : { + "_index" : "_index", + "_type" : "_doc", + "_id" : "_id", + "_source" : { + "host" : { + "os" : { + "name" : "Ubuntu" + } + }, + "os" : "Ubuntu" + }, + "_ingest" : { + "timestamp" : "2019-03-11T21:54:37.909224Z" + } + } + } + ] +} +-------------------------------------------------- +// TESTRESPONSE[s/2019-03-11T21:54:37.909224Z/$body.docs.0.doc._ingest.timestamp/] diff --git a/docs/reference/ingest/processors/user-agent.asciidoc b/docs/reference/ingest/processors/user-agent.asciidoc index 942ba9f148799..152989e956de9 100644 --- a/docs/reference/ingest/processors/user-agent.asciidoc +++ b/docs/reference/ingest/processors/user-agent.asciidoc @@ -19,6 +19,7 @@ The ingest-user-agent module ships by default with the regexes.yaml made availab | `regex_file` | no | - | The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the regexes.yaml from uap-core it ships with (see below). | `properties` | no | [`name`, `major`, `minor`, `patch`, `build`, `os`, `os_name`, `os_major`, `os_minor`, `device`] | Controls what properties are added to `target_field`. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `ecs` | no | `true` | Whether to return the output in Elastic Common Schema format. NOTE: This setting is deprecated and will be removed in a future version. |====== Here is an example that adds the user agent details to the `user_agent` field based on the `agent` field: diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc new file mode 100644 index 0000000000000..c16f1bd78a1d5 --- /dev/null +++ b/docs/reference/intro.asciidoc @@ -0,0 +1,270 @@ +[[elasticsearch-intro]] += Elasticsearch introduction +[partintro] +-- +_**You know, for search (and analysis)**_ + +{es} is the distributed search and analytics engine at the heart of +the {stack}. {ls} and {beats} facilitate collecting, aggregating, and +enriching your data and storing it in {es}. {kib} enables you to +interactively explore, visualize, and share insights into your data and manage +and monitor the stack. {es} is where the indexing, search, and analysis +magic happen. + +{es} provides real-time search and analytics for all types of data. Whether you +have structured or unstructured text, numerical data, or geospatial data, +{es} can efficiently store and index it in a way that supports fast searches. +You can go far beyond simple data retrieval and aggregate information to discover +trends and patterns in your data. And as your data and query volume grows, the +distributed nature of {es} enables your deployment to grow seamlessly right +along with it. + +While not _every_ problem is a search problem, {es} offers speed and flexibility +to handle data in a wide variety of use cases: + +* Add a search box to an app or website +* Store and analyze logs, metrics, and security event data +* Use machine learning to automatically model the behavior of your data in real + time +* Automate business workflows using {es} as a storage engine +* Manage, integrate, and analyze spatial information using {es} as a geographic + information system (GIS) +* Store and process genetic data using {es} as a bioinformatics research tool + +We’re continually amazed by the novel ways people use search. But whether +your use case is similar to one of these, or you're using {es} to tackle a new +problem, the way you work with your data, documents, and indices in {es} is +the same. +-- + +[[documents-indices]] +== Data in: documents and indices + +{es} is a distributed document store. Instead of storing information as rows of +columnar data, {es} stores complex data structures that have been serialized +as JSON documents. When you have multiple {es} nodes in a cluster, stored +documents are distributed across the cluster and can be accessed immediately +from any node. + +When a document is stored, it is indexed and fully searchable in near +real-time--within 1 second. {es} uses a data structure called an +inverted index that supports very fast full-text searches. An inverted index +lists every unique word that appears in any document and identifies all of the +documents each word occurs in. + +An index can be thought of as an optimized collection of documents and each +document is a collection of fields, which are the key-value pairs that contain +your data. By default, {es} indexes all data in every field and each indexed +field has a dedicated, optimized data structure. For example, text fields are +stored in inverted indices, and numeric and geo fields are stored in BKD trees. +The ability to use the per-field data structures to assemble and return search +results is what makes {es} so fast. + +{es} also has the ability to be schema-less, which means that documents can be +indexed without explicitly specifying how to handle each of the different fields +that might occur in a document. When dynamic mapping is enabled, {es} +automatically detects and adds new fields to the index. This default +behavior makes it easy to index and explore your data--just start +indexing documents and {es} will detect and map booleans, floating point and +integer values, dates, and strings to the appropriate {es} datatypes. + +Ultimately, however, you know more about your data and how you want to use it +than {es} can. You can define rules to control dynamic mapping and explicitly +define mappings to take full control of how fields are stored and indexed. + +Defining your own mappings enables you to: + +* Distinguish between full-text string fields and exact value string fields +* Perform language-specific text analysis +* Optimize fields for partial matching +* Use custom date formats +* Use data types such as `geo_point` and `geo_shape` that cannot be automatically +detected + +It’s often useful to index the same field in different ways for different +purposes. For example, you might want to index a string field as both a text +field for full-text search and as a keyword field for sorting or aggregating +your data. Or, you might choose to use more than one language analyzer to +process the contents of a string field that contains user input. + +The analysis chain that is applied to a full-text field during indexing is also +used at search time. When you query a full-text field, the query text undergoes +the same analysis before the terms are looked up in the index. + +[[search-analyze]] +== Information out: search and analyze + +While you can use {es} as a document store and retrieve documents and their +metadata, the real power comes from being able to easily access the full suite +of search capabilities built on the Apache Lucene search engine library. + +{es} provides a simple, coherent REST API for managing your cluster and indexing +and searching your data. For testing purposes, you can easily submit requests +directly from the command line or through the Developer Console in {kib}. From +your applications, you can use the +https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} client] +for your language of choice: Java, JavaScript, Go, .NET, PHP, Perl, Python +or Ruby. + +[float] +[[search-data]] +=== Searching your data + +The {es} REST APIs support structured queries, full text queries, and complex +queries that combine the two. Structured queries are +similar to the types of queries you can construct in SQL. For example, you +could search the `gender` and `age` fields in your `employee` index and sort the +matches by the `hire_date` field. Full-text queries find all documents that +match the query string and return them sorted by _relevance_—how good a +match they are for your search terms. + +In addition to searching for individual terms, you can perform phrase searches, +similarity searches, and prefix searches, and get autocomplete suggestions. + +Have geospatial or other numerical data that you want to search? {es} indexes +non-textual data in optimized data structures that support +high-performance geo and numerical queries. + +You can access all of these search capabilities using {es}'s +comprehensive JSON-style query language (<>). You can also +construct <> to search and aggregate data +natively inside {es}, and JDBC and ODBC drivers enable a broad range of +third-party applications to interact with {es} via SQL. + +[float] +[[analyze-data]] +=== Analyzing your data + +{es} aggregations enable you to build complex summaries of your data and gain +insight into key metrics, patterns, and trends. Instead of just finding the +proverbial “needle in a haystack”, aggregations enable you to answer questions +like: + +* How many needles are in the haystack? +* What is the average length of the needles? +* What is the median length of the needles, broken down by manufacturer? +* How many needles were added to the haystack in each of the last six months? + +You can also use aggregations to answer more subtle questions, such as: + +* What are your most popular needle manufacturers? +* Are there any unusual or anomalous clumps of needles? + +Because aggregations leverage the same data-structures used for search, they are +also very fast. This enables you to analyze and visualize your data in real time. +Your reports and dashboards update as your data changes so you can take action +based on the latest information. + +What’s more, aggregations operate alongside search requests. You can search +documents, filter results, and perform analytics at the same time, on the same +data, in a single request. And because aggregations are calculated in the +context of a particular search, you’re not just displaying a count of all +size 70 needles, you’re displaying a count of the size 70 needles +that match your users' search criteria--for example, all size 70 _non-stick +embroidery_ needles. + +[float] +[[more-features]] +==== But wait, there’s more + +Want to automate the analysis of your time-series data? You can use +{stack-ov}/ml-overview.html[machine learning] features to create accurate +baselines of normal behavior in your data and identify anomalous patterns. With +machine learning, you can detect: + +* Anomalies related to temporal deviations in values, counts, or frequencies +* Statistical rarity +* Unusual behaviors for a member of a population + +And the best part? You can do this without having to specify algorithms, models, +or other data science-related configurations. + +[[scalability]] +== Scalability and resilience: clusters, nodes, and shards +++++ +Scalability and resilience +++++ + +{es} is built to be always available and to scale with your needs. It does this +by being distributed by nature. You can add servers (nodes) to a cluster to +increase capacity and {es} automatically distributes your data and query load +across all of the available nodes. No need to overhaul your application, {es} +knows how to balance multi-node clusters to provide scale and high availability. +The more nodes, the merrier. + +How does this work? Under the covers, an {es} index is really just a logical +grouping of one or more physical shards, where each shard is actually a +self-contained index. By distributing the documents in an index across multiple +shards, and distributing those shards across multiple nodes, {es} can ensure +redundancy, which both protects against hardware failures and increases +query capacity as nodes are added to a cluster. As the cluster grows (or shrinks), +{es} automatically migrates shards to rebalance the cluster. + +There are two types of shards: primaries and replicas. Each document in an index +belongs to one primary shard. A replica shard is a copy of a primary shard. +Replicas provide redundant copies of your data to protect against hardware +failure and increase capacity to serve read requests +like searching or retrieving a document. + +The number of primary shards in an index is fixed at the time that an index is +created, but the number of replica shards can be changed at any time, without +interrupting indexing or query operations. + +[float] +[[it-depends]] +=== It depends... + +There are a number of performance considerations and trade offs with respect +to shard size and the number of primary shards configured for an index. The more +shards, the more overhead there is simply in maintaining those indices. The +larger the shard size, the longer it takes to move shards around when {es} +needs to rebalance a cluster. + +Querying lots of small shards makes the processing per shard faster, but more +queries means more overhead, so querying a smaller +number of larger shards might be faster. In short...it depends. + +As a starting point: + +* Aim to keep the average shard size between a few GB and a few tens of GB. For + use cases with time-based data, it is common to see shards in the 20GB to 40GB + range. + +* Avoid the gazillion shards problem. The number of shards a node can hold is + proportional to the available heap space. As a general rule, the number of + shards per GB of heap space should be less than 20. + +The best way to determine the optimal configuration for your use case is +through https://www.elastic.co/elasticon/conf/2016/sf/quantitative-cluster-sizing[ +testing with your own data and queries]. + +[float] +[[disaster-ccr]] +=== In case of disaster + +For performance reasons, the nodes within a cluster need to be on the same +network. Balancing shards in a cluster across nodes in different data centers +simply takes too long. But high-availability architectures demand that you avoid +putting all of your eggs in one basket. In the event of a major outage in one +location, servers in another location need to be able to take over. Seamlessly. +The answer? {ccr-cap} (CCR). + +CCR provides a way to automatically synchronize indices from your primary cluster +to a secondary remote cluster that can serve as a hot backup. If the primary +cluster fails, the secondary cluster can take over. You can also use CCR to +create secondary clusters to serve read requests in geo-proximity to your users. + +{ccr-cap} is active-passive. The index on the primary cluster is +the active leader index and handles all write requests. Indices replicated to +secondary clusters are read-only followers. + +[float] +[[admin]] +=== Care and feeding + +As with any enterprise system, you need tools to secure, manage, and +monitor your {es} clusters. Security, monitoring, and administrative features +that are integrated into {es} enable you to use {kibana-ref}/introduction.html[{kib}] +as a control center for managing a cluster. Features like <> and <> +help you intelligently manage your data over time. diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index 0dfe9b2da888a..04798c03947ba 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -79,20 +79,20 @@ POST /_license NOTE: These values are invalid; you must substitute the appropriate content from your license file. -You can alternatively use a `curl` command, for example: +You can also install your license file using a `curl` command. Be sure to add +`@` before the license file path to instruct curl to treat it as an input file. -[source,js] [source,shell] ------------------------------------------------------------ curl -XPUT -u 'http://:/_license' -H "Content-Type: application/json" -d @license.json ------------------------------------------------------------ // NOTCONSOLE -On Windows machine, use the following command: +On Windows, use the following command: [source,shell] ------------------------------------------------------------ -gc .\license.json | Invoke-WebRequest -uri http://:/_license -Credential elastic -Method Put -ContentType "application/json" +Invoke-WebRequest -uri http://:/_xpack/license -Credential elastic -Method Put -ContentType "application/json" -InFile .\license.json ------------------------------------------------------------ In these examples, diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index b6a7052f69a0f..1d47de23dfb7f 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -5,7 +5,7 @@ -- Mapping is the process of defining how a document, and the fields it contains, -are stored and indexed. For instance, use mappings to define: +are stored and indexed. For instance, use mappings to define: * which string fields should be treated as full text fields. * which fields contain numbers, dates, or geolocations. @@ -20,7 +20,7 @@ are stored and indexed. For instance, use mappings to define: Each index has one _mapping type_ which determines how the document will be indexed. -deprecated[6.0.0,See <>]. +deprecated::[6.0.0,See <>] A mapping type has: @@ -87,15 +87,11 @@ causing a mapping explosion: `2`, etc. The default is `20`. `index.mapping.nested_fields.limit`:: - The maximum number of `nested` fields in an index, defaults to `50`. - Indexing 1 document with 100 nested fields actually indexes 101 documents - as each nested document is indexed as a separate hidden document. + The maximum number of distinct `nested` mappings in an index, defaults to `50`. `index.mapping.nested_objects.limit`:: - The maximum number of `nested` json objects within a single document across - all nested fields, defaults to 10000. Indexing one document with an array of - 100 objects within a nested field, will actually create 101 documents, as - each nested object will be indexed as a separate hidden document. + The maximum number of `nested` JSON objects within a single document across + all nested types, defaults to 10000. [float] diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 8598eab412e79..c5109ab9c9fc3 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -198,14 +198,37 @@ PUT my_index PUT my_index/_doc/1 { "name": { - "first": "Alice", - "middle": "Mary", - "last": "White" + "first": "John", + "middle": "Winston", + "last": "Lennon" } } -------------------------------------------------- // CONSOLE +Note that the `path_match` and `path_unmatch` parameters match on object paths +in addition to leaf fields. As an example, indexing the following document will +result in an error because the `path_match` setting also matches the object +field `name.title`, which can't be mapped as text: + +[source,js] +-------------------------------------------------- +PUT my_index/_doc/2 +{ + "name": { + "first": "Paul", + "last": "McCartney", + "title": { + "value": "Sir", + "category": "order of chivalry" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[catch:bad_request] + [[template-variables]] ==== `{name}` and `{dynamic_type}` @@ -286,6 +309,7 @@ PUT my_index -------------------------------------------------- // CONSOLE +[[text-only-mappings-strings]] ===== `text`-only mappings for strings On the contrary to the previous example, if the only thing that you care about diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index c455c55f5ea7f..1ae4ab4c8fc89 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -11,6 +11,7 @@ Now the `_field_names` field only indexes the names of fields that have or `norm` enabled the <> query will still be available but will not use the `_field_names` field. +[[disable-field-names]] ==== Disabling `_field_names` Disabling `_field_names` is often not necessary because it no longer diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index 0f4ed15196962..5ccf1cc8ec75c 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -39,3 +39,9 @@ but doing so is discouraged as it requires to load a lot of data in memory. In case sorting or aggregating on the `_id` field is required, it is advised to duplicate the content of the `_id` field in another field that has `doc_values` enabled. + + +[NOTE] +================================================== +`_id` is limited to 512 bytes in size and larger values will be rejected. +================================================== diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 757fc0fa5b662..c9fd2cf186909 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -6,6 +6,7 @@ at index time. The `_source` field itself is not indexed (and thus is not searchable), but it is stored so that it can be returned when executing _fetch_ requests, like <> or <>. +[[disable-source-field]] ==== Disabling the `_source` field Though very handy to have around, the source field does incur storage overhead diff --git a/docs/reference/mapping/params/boost.asciidoc b/docs/reference/mapping/params/boost.asciidoc index 7da03a66ac44e..6dfe564ed7c58 100644 --- a/docs/reference/mapping/params/boost.asciidoc +++ b/docs/reference/mapping/params/boost.asciidoc @@ -64,7 +64,7 @@ POST _search // CONSOLE -deprecated[5.0.0, index time boost is deprecated. Instead, the field mapping boost is applied at query time. For indices created before 5.0.0 the boost will still be applied at index time.] +deprecated[5.0.0, "Index time boost is deprecated. Instead, the field mapping boost is applied at query time. For indices created before 5.0.0, the boost will still be applied at index time."] [WARNING] .Why index time boosting is a bad idea ================================================== diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index 55f31262351fd..be5b2a648c600 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -47,8 +47,7 @@ PUT my_index/_doc/2 <1> The `number_one` field will contain the integer `10`. <2> This document will be rejected because coercion is disabled. -TIP: The `coerce` setting is allowed to have different settings for fields of -the same name in the same index. Its value can be updated on existing fields +TIP: The `coerce` setting value can be updated on existing fields using the <>. [[coerce-setting]] diff --git a/docs/reference/mapping/params/eager-global-ordinals.asciidoc b/docs/reference/mapping/params/eager-global-ordinals.asciidoc index 8973be951129c..a182a4776d9e2 100644 --- a/docs/reference/mapping/params/eager-global-ordinals.asciidoc +++ b/docs/reference/mapping/params/eager-global-ordinals.asciidoc @@ -1,37 +1,52 @@ [[eager-global-ordinals]] === `eager_global_ordinals` -Global ordinals is a data-structure on top of doc values, that maintains an -incremental numbering for each unique term in a lexicographic order. Each -term has a unique number and the number of term 'A' is lower than the -number of term 'B'. Global ordinals are only supported with -<> and <> fields. In `keyword` fields, they -are available by default but `text` fields can only use them when `fielddata`, -with all of its associated baggage, is enabled. - -Doc values (and fielddata) also have ordinals, which is a unique numbering for -all terms in a particular segment and field. Global ordinals just build on top -of this, by providing a mapping between the segment ordinals and the global -ordinals, the latter being unique across the entire shard. Given that global -ordinals for a specific field are tied to _all the segments of a shard_, they -need to be entirely rebuilt whenever a once new segment becomes visible. - -Global ordinals are used for features that use segment ordinals, such as -the <>, -to improve the execution time. A terms aggregation relies purely on global -ordinals to perform the aggregation at the shard level, then converts global -ordinals to the real term only for the final reduce phase, which combines -results from different shards. - -The loading time of global ordinals depends on the number of terms in a field, -but in general it is low, since it source field data has already been loaded. -The memory overhead of global ordinals is a small because it is very -efficiently compressed. - -By default, global ordinals are loaded at search-time, which is the right -trade-off if you are optimizing for indexing speed. However, if you are more -interested in search speed, it could be interesting to set -`eager_global_ordinals: true` on fields that you plan to use in terms +==== What are global ordinals? + +To support aggregations and other operations that require looking up field +values on a per-document basis, Elasticsearch uses a data structure called +<>. Term-based field types such as `keyword` store +their doc values using an ordinal mapping for a more compact representation. +This mapping works by assigning each term an incremental integer or 'ordinal' +based on its lexicographic order. The field's doc values store only the +ordinals for each document instead of the original terms, with a separate +lookup structure to convert between ordinals and terms. + +When used during aggregations, ordinals can greatly improve performance. As an +example, the `terms` aggregation relies only on ordinals to collect documents +into buckets at the shard-level, then converts the ordinals back to their +original term values when combining results across shards. + +Each index segment defines its own ordinal mapping, but aggregations collect +data across an entire shard. So to be able to use ordinals for shard-level +operations like aggregations, Elasticsearch creates a unified mapping called +'global ordinals'. The global ordinal mapping is built on top of segment +ordinals, and works by maintaining a map from global ordinal to the local +ordinal for each segment. + +Global ordinals are used if a search contains any of the following components: + +* Certain bucket aggregations on `keyword`, `ip`, and `flattened` fields. This +includes `terms` aggregations as mentioned above, as well as `composite`, +`diversified_sampler`, and `significant_terms`. +* Bucket aggregations on `text` fields that require <> +to be enabled. +* Operations on parent and child documents from a `join` field, including +`has_child` queries and `parent` aggregations. + +NOTE: The global ordinal mapping is an on-heap data structure. When measuring +memory usage, Elasticsearch counts the memory from global ordinals as +'fielddata'. Global ordinals memory is included in the +<>, and is returned +under `fielddata` in the <> response. + +==== Loading global ordinals + +The global ordinal mapping must be built before ordinals can be used during a +search. By default, the mapping is loaded during search on the first time that +global ordinals are needed. This is is the right approach if you are optimizing +for indexing speed, but if search performance is a priority, it's recommended +to eagerly load global ordinals eagerly on fields that will be used in aggregations: [source,js] @@ -49,13 +64,14 @@ PUT my_index/_mapping // CONSOLE // TEST[s/^/PUT my_index\n/] -This will shift the cost from search-time to refresh-time. Elasticsearch will -make sure that global ordinals are built before publishing updates to the -content of the index. +When `eager_global_ordinals` is enabled, global ordinals are built when a shard +is <> -- Elasticsearch always loads them before +exposing changes to the content of the index. This shifts the cost of building +global ordinals from search to index-time. Elasticsearch will also eagerly +build global ordinals when creating a new copy of a shard, as can occur when +increasing the number of replicas or relocating a shard onto a new node. -If you ever decide that you do not need to run `terms` aggregations on this -field anymore, then you can disable eager loading of global ordinals at any -time: +Eager loading can be disabled at any time by updating the `eager_global_ordinals` setting: [source,js] ------------ @@ -72,3 +88,32 @@ PUT my_index/_mapping // CONSOLE // TEST[continued] +IMPORTANT: On a <>, global ordinals are discarded +after each search and rebuilt again when they're requested. This means that +`eager_global_ordinals` should not be used on frozen indices: it would +cause global ordinals to be reloaded on every search. Instead, the index should +be force-merged to a single segment before being frozen. This avoids building +global ordinals altogether (more details can be found in the next section). + +==== Avoiding global ordinal loading + +Usually, global ordinals do not present a large overhead in terms of their +loading time and memory usage. However, loading global ordinals can be +expensive on indices with large shards, or if the fields contain a large +number of unique term values. Because global ordinals provide a unified mapping +for all segments on the shard, they also need to be rebuilt entirely when a new +segment becomes visible. + +In some cases it is possible to avoid global ordinal loading altogether: + +* The `terms`, `sampler`, and `significant_terms` aggregations support a +parameter +<> +that helps control how buckets are collected. It defaults to `global_ordinals`, +but can be set to `map` to instead use the term values directly. +* If a shard has been <> down to a single +segment, then its segment ordinals are already 'global' to the shard. In this +case, Elasticsearch does not need to build a global ordinal mapping and there +is no additional overhead from using global ordinals. Note that for performance +reasons you should only force-merge an index to which you will never write to +again. diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index 06b76ddeae006..7193c6aa9f6e3 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -7,11 +7,11 @@ you are using Elasticsearch as a web session store. You may want to index the session ID and last update time, but you don't need to query or run aggregations on the session data itself. -The `enabled` setting, which can be applied only to the mapping type and to -<> fields, causes Elasticsearch to skip parsing of the -contents of the field entirely. The JSON can still be retrieved from the -<> field, but it is not searchable or stored -in any other way: +The `enabled` setting, which can be applied only to the top-level mapping +definition and to <> fields, causes Elasticsearch to skip +parsing of the contents of the field entirely. The JSON can still be retrieved +from the <> field, but it is not searchable or +stored in any other way: [source,js] -------------------------------------------------- @@ -26,6 +26,7 @@ PUT my_index "type": "date" }, "session_data": { <1> + "type": "object", "enabled": false } } @@ -55,7 +56,7 @@ PUT my_index/_doc/session_2 <2> Any arbitrary data can be passed to the `session_data` field as it will be entirely ignored. <3> The `session_data` will also ignore values that are not JSON objects. -The entire mapping type may be disabled as well, in which case the document is +The entire mapping may be disabled as well, in which case the document is stored in the <> field, which means it can be retrieved, but none of its contents are indexed in any way: @@ -84,10 +85,34 @@ GET my_index/_doc/session_1 <2> GET my_index/_mapping <3> -------------------------------------------------- // CONSOLE -<1> The entire mapping type is disabled. +<1> The entire mapping is disabled. <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. TIP: The `enabled` setting can be updated on existing fields using the <>. +Note that because Elasticsearch completely skips parsing the field +contents, it is possible to add non-object data to a disabled field: +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "session_data": { + "type": "object", + "enabled": false + } + } + } +} + +PUT my_index/_doc/session_1 +{ + "session_data": "foo bar" <1> +} +-------------------------------------------------- +// CONSOLE + +<1> The document is added successfully, even though `session_data` contains non-object data. \ No newline at end of file diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index 42f02b7ee28ea..723eea3d0fc4b 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -19,6 +19,7 @@ reading the entire inverted index for each segment from disk, inverting the term ↔︎ document relationship, and storing the result in memory, in the JVM heap. +[[fielddata-disabled-text-fields]] ==== Fielddata is disabled on `text` fields by default Fielddata can consume a *lot* of heap space, especially when loading high @@ -30,12 +31,10 @@ why fielddata is disabled by default. If you try to sort, aggregate, or access values from a script on a `text` field, you will see this exception: -[quote] --- +[literal] Fielddata is disabled on text fields by default. Set `fielddata=true` on [`your_field_name`] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. --- [[before-enabling-fielddata]] ==== Before enabling fielddata @@ -75,6 +74,7 @@ PUT my_index <1> Use the `my_field` field for searches. <2> Use the `my_field.keyword` field for aggregations, sorting, or in scripts. +[[enable-fielddata-text-fields]] ==== Enabling fielddata on `text` fields You can enable fielddata on an existing `text` field using the diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 2be1bdf12d891..8e79a217a1a49 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -33,7 +33,7 @@ down to the nearest day. ==== Custom date formats Completely customizable date formats are supported. The syntax for these is explained -http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[in the Joda docs]. +https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[DateTimeFormatter docs]. [[built-in-date-formats]] ==== Built In Formats @@ -69,7 +69,6 @@ The following tables lists all the defaults ISO formats supported: A generic ISO datetime parser where the date is mandatory and the time is optional. - http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateOptionalTimeParser--[Full details here]. `basic_date`:: diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 30aa6c4e8bc0d..d84a7290eb7c9 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -46,8 +46,7 @@ PUT my_index/_doc/2 <1> This document will have the `text` field indexed, but not the `number_one` field. <2> This document will be rejected because `number_two` does not allow malformed values. -TIP: The `ignore_malformed` setting is allowed to have different settings for -fields of the same name in the same index. Its value can be updated on +TIP: The `ignore_malformed` setting value can be updated on existing fields using the <>. @@ -91,3 +90,17 @@ become meaningless. Elasticsearch makes it easy to check how many documents have malformed fields by using `exist` or `term` queries on the special <> field. +[[json-object-limits]] +==== Limits for JSON Objects +You can't use `ignore_malformed` with the following datatypes: + +* <> +* <> +* <> + +You also can't use `ignore_malformed` to ignore JSON objects submitted to fields +of the wrong datatype. A JSON object is any data surrounded by curly brackets +`"{}"` and includes data mapped to the nested, object, and range datatypes. + +If you submit a JSON object to an unsupported field, {es} will return an error +and reject the entire document regardless of the `ignore_malformed` setting. \ No newline at end of file diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index ee1bc02c7fd8d..448f7fd2e81ec 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -60,8 +60,7 @@ GET my_index/_search NOTE: Multi-fields do not change the original `_source` field. -TIP: The `fields` setting is allowed to have different settings for fields of -the same name in the same index. New multi-fields can be added to existing +TIP: New multi-fields can be added to existing fields using the <>. ==== Multi-fields with multiple analyzers diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index da0298abda228..0f8c09552f4c6 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -7,7 +7,7 @@ produces a single token. The `normalizer` is applied prior to indexing the keyword, as well as at search-time when the `keyword` field is searched via a query parser such as -the <> query or via a term level query +the <> query or via a term-level query such as the <> query. [source,js] diff --git a/docs/reference/mapping/params/norms.asciidoc b/docs/reference/mapping/params/norms.asciidoc index 8a7be4baef8c5..6a250d296a2dc 100644 --- a/docs/reference/mapping/params/norms.asciidoc +++ b/docs/reference/mapping/params/norms.asciidoc @@ -11,11 +11,10 @@ don't need scoring on a specific field, you should disable norms on that field. In particular, this is the case for fields that are used solely for filtering or aggregations. -TIP: The `norms` setting must have the same setting for fields of the -same name in the same index. Norms can be disabled on existing fields using +TIP: Norms can be disabled on existing fields using the <>. -Norms can be disabled (but not reenabled) after the fact, using the +Norms can be disabled (but not reenabled after the fact), using the <> like so: [source,js] diff --git a/docs/reference/mapping/params/term-vector.asciidoc b/docs/reference/mapping/params/term-vector.asciidoc index ff05539522efc..7a97955c1734f 100644 --- a/docs/reference/mapping/params/term-vector.asciidoc +++ b/docs/reference/mapping/params/term-vector.asciidoc @@ -8,6 +8,8 @@ Term vectors contain information about the terms produced by the * the position (or order) of each term. * the start and end character offsets mapping the term to its origin in the original string. +* payloads (if they are available) — user-defined binary data + associated with each term position. These term vectors can be stored so that they can be retrieved for a particular document. @@ -20,9 +22,11 @@ The `term_vector` setting accepts: `with_positions`:: Terms and positions are stored. `with_offsets`:: Terms and character offsets are stored. `with_positions_offsets`:: Terms, positions, and character offsets are stored. +`with_positions_payloads`:: Terms, positions, and payloads are stored. +`with_positions_offsets_payloads`:: Terms, positions, offsets and payloads are stored. -The fast vector highlighter requires `with_positions_offsets`. The term -vectors API can retrieve whatever is stored. +The fast vector highlighter requires `with_positions_offsets`. +<> can retrieve whatever is stored. WARNING: Setting `with_positions_offsets` will double the size of a field's index. diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index db8b3a3d17c53..3af730723a1db 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -1,11 +1,11 @@ [[removal-of-types]] == Removal of mapping types -IMPORTANT: Indices created in Elasticsearch 6.0.0 or later may only contain a -single <>. Indices created in 5.x with multiple -mapping types will continue to function as before in Elasticsearch 6.x. -Types will be deprecated in APIs in Elasticsearch 7.0.0, and completely -removed in 8.0.0. +IMPORTANT: Indices created in Elasticsearch 7.0.0 or later no longer accept a +`_default_` mapping. Indices created in 6.x will continue to function as before +in Elasticsearch 6.x. Types are deprecated in APIs in 7.0, with breaking changes +to the index creation, put mapping, get mapping, put template, get template and +get field mappings APIs. [float] === What are mapping types? @@ -216,6 +216,7 @@ GET twitter/_search <1> The explicit `type` field takes the place of the implicit `_type` field. [float] +[[parent-child-mapping-types]] ==== Parent/Child without mapping types Previously, a parent-child relationship was represented by making one mapping @@ -257,22 +258,24 @@ Elasticsearch 6.x:: * The `_default_` mapping type is deprecated. -* In 6.7, the index creation, index template, and mapping APIs support a query +* In 6.8, the index creation, index template, and mapping APIs support a query string parameter (`include_type_name`) which indicates whether requests and - responses should include a type name. It defaults to `true`, and not setting - `include_type_name=false` will result in a deprecation warning. Indices which - don't have an explicit type will use the dummy type name `_doc`. + responses should include a type name. It defaults to `true`, and should be set + to an explicit value to prepare to upgrade to 7.0. Not setting `include_type_name` + will result in a deprecation warning. Indices which don't have an explicit type will + use the dummy type name `_doc`. Elasticsearch 7.x:: * Specifying types in requests is deprecated. For instance, indexing a document no longer requires a document `type`. The new index APIs are `PUT {index}/_doc/{id}` in case of explicit ids and `POST {index}/_doc` - for auto-generated ids. + for auto-generated ids. Note that in 7.0, `_doc` is a permanent part of the + path, and represents the endpoint name rather than the document type. * The `include_type_name` parameter in the index creation, index template, - and mapping APIs will default to `false`. Setting the parameter will result - in a deprecation warning. + and mapping APIs will default to `false`. Setting the parameter at all will + result in a deprecation warning. * The `_default_` mapping type is removed. @@ -349,7 +352,8 @@ POST _reindex "type": "user" }, "dest": { - "index": "users" + "index": "users", + "type": "_doc" } } @@ -360,7 +364,8 @@ POST _reindex "type": "tweet" }, "dest": { - "index": "tweets" + "index": "tweets", + "type": "_doc" } } ---- @@ -430,7 +435,7 @@ In Elasticsearch 7.0, each API will support typeless requests, and specifying a type will produce a deprecation warning. NOTE: Typeless APIs work even if the target index contains a custom type. -For example, if an index has the the custom type name `my_type`, we can add +For example, if an index has the custom type name `my_type`, we can add documents to it using typeless `index` calls, and load documents with typeless `get` calls. @@ -438,12 +443,16 @@ documents to it using typeless `index` calls, and load documents with typeless ==== Indices APIs Index creation, index template, and mapping APIs support a new `include_type_name` -url parameter that specifies whether mapping definitions in requests and responses -should contain the type name. The parameter defaults to `true` in version 6.7 to +URL parameter that specifies whether mapping definitions in requests and responses +should contain the type name. The parameter defaults to `true` in version 6.8 to match the pre-7.0 behavior of using type names in mappings. It defaults to `false` in version 7.0 and will be removed in version 8.0. -See some examples of interactions with Elasticsearch with this option provided: +It should be set explicitly in 6.8 to prepare to upgrade to 7.0. To avoid deprecation +warnings in 6.8, the parameter can be set to either `true` or `false`. In 7.0, setting +`include_type_name` at all will result in a deprecation warning. + +See some examples of interactions with Elasticsearch with this option set to `false`: [source,js] -------------------------------------------------- @@ -548,6 +557,10 @@ GET index/_doc/1 // CONSOLE // TEST[continued] +NOTE: In 7.0, `_doc` represents the endpoint name instead of the document type. +The `_doc` component is a permanent part of the path for the document `index`, +`get`, and `delete` APIs going forward, and will not be removed in 8.0. + For API paths that contain both a type and endpoint name like `_update`, in 7.0 the endpoint will immediately follow the index name: @@ -631,8 +644,9 @@ GET index/_doc/1 [float] ==== Index templates -It is recommended to make index templates typeless before upgrading to 7.0 by -re-adding them with `include_type_name` set to `false`. +It is recommended to make index templates typeless by re-adding them with +`include_type_name` set to `false`. Under the hood, typeless templates will use +the dummy type `_doc` when creating indices. In case typeless templates are used with typed index creation calls or typed templates are used with typeless index creation calls, the template will still @@ -705,12 +719,12 @@ indices. [float] ==== Mixed-version clusters -In a cluster composed of both 6.7 and 7.0 nodes, the parameter +In a cluster composed of both 6.8 and 7.0 nodes, the parameter `include_type_name` should be specified in indices APIs like index creation. This is because the parameter has a different default between -6.7 and 7.0, so the same mapping definition will not be valid for both +6.8 and 7.0, so the same mapping definition will not be valid for both node versions. Typeless document APIs such as `bulk` and `update` are only available as of -7.0, and will not work with 6.7 nodes. This also holds true for the typeless +7.0, and will not work with 6.8 nodes. This also holds true for the typeless versions of queries that perform document lookups, such as `terms`. diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 76b832a529fb4..7e7f60f4012df 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -17,8 +17,6 @@ string:: <> and <> [float] === Complex datatypes - -<>:: Array support does not require a dedicated `type` <>:: `object` for single JSON objects <>:: `nested` for arrays of JSON objects @@ -53,6 +51,13 @@ string:: <> and <> <>:: Record sparse vectors of float values. +[float] +[[types-array-handling]] +=== Arrays +In {es}, arrays do not require a dedicated field datatype. Any field can contain +zero or more values by default, however, all values in the array must be of the +same datatype. See <>. + [float] === Multi-fields @@ -73,20 +78,22 @@ include::types/array.asciidoc[] include::types/binary.asciidoc[] -include::types/range.asciidoc[] - include::types/boolean.asciidoc[] include::types/date.asciidoc[] include::types/date_nanos.asciidoc[] +include::types/dense-vector.asciidoc[] + include::types/geo-point.asciidoc[] include::types/geo-shape.asciidoc[] include::types/ip.asciidoc[] +include::types/parent-join.asciidoc[] + include::types/keyword.asciidoc[] include::types/nested.asciidoc[] @@ -95,18 +102,16 @@ include::types/numeric.asciidoc[] include::types/object.asciidoc[] -include::types/text.asciidoc[] - -include::types/token-count.asciidoc[] - include::types/percolator.asciidoc[] -include::types/parent-join.asciidoc[] +include::types/range.asciidoc[] include::types/rank-feature.asciidoc[] include::types/rank-features.asciidoc[] -include::types/dense-vector.asciidoc[] - include::types/sparse-vector.asciidoc[] + +include::types/text.asciidoc[] + +include::types/token-count.asciidoc[] diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index 318124d71333e..89229ce0bb9eb 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -1,5 +1,8 @@ [[alias]] === Alias datatype +++++ +Alias +++++ An `alias` mapping defines an alternate name for a field in the index. The alias can be used in place of the target field in <> requests, @@ -16,7 +19,7 @@ PUT trips }, "route_length_miles": { "type": "alias", - "path": "distance" // <1> + "path": "distance" <1> }, "transit_mode": { "type": "keyword" @@ -68,6 +71,10 @@ There are a few restrictions on the target of an alias: Additionally, a field alias can only have one target. This means that it is not possible to use a field alias to query over multiple target fields in a single clause. +An alias can be changed to refer to a new target through a mappings update. A known limitation is that +if any stored percolator queries contain the field alias, they will still refer to its original target. +More information can be found in the <>. + [[unsupported-apis]] ==== Unsupported APIs diff --git a/docs/reference/mapping/types/array.asciidoc b/docs/reference/mapping/types/array.asciidoc index 385c61ebcd753..c7e3e273619be 100644 --- a/docs/reference/mapping/types/array.asciidoc +++ b/docs/reference/mapping/types/array.asciidoc @@ -1,9 +1,9 @@ [[array]] -=== Array datatype +=== Arrays -In Elasticsearch, there is no dedicated `array` type. Any field can contain -zero or more values by default, however, all values in the array must be of -the same datatype. For instance: +In Elasticsearch, there is no dedicated `array` datatype. Any field can contain +zero or more values by default, however, all values in the array must be of the +same datatype. For instance: * an array of strings: [ `"one"`, `"two"` ] * an array of integers: [ `1`, `2` ] @@ -81,6 +81,7 @@ GET my_index/_search <3> The second document contains no arrays, but can be indexed into the same fields. <4> The query looks for `elasticsearch` in the `tags` field, and matches both documents. +[[multi-value-fields-inverted-index]] .Multi-value fields and the inverted index **************************************************** diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index 22e107dab565d..41478d1965a61 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -1,5 +1,8 @@ [[binary]] === Binary datatype +++++ +Binary +++++ The `binary` type accepts a binary value as a https://en.wikipedia.org/wiki/Base64[Base64] encoded string. The field is not diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 962022060b65b..790b5013d1af0 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -1,5 +1,8 @@ [[boolean]] === Boolean datatype +++++ +Boolean +++++ Boolean fields accept JSON `true` and `false` values, but can also accept strings which are interpreted as either true or false: diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index 94aadb46fb2b6..3a5c480584613 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -1,5 +1,8 @@ [[date]] === Date datatype +++++ +Date +++++ JSON doesn't have a date datatype, so dates in Elasticsearch can either be: diff --git a/docs/reference/mapping/types/date_nanos.asciidoc b/docs/reference/mapping/types/date_nanos.asciidoc index 45d53a19b72f1..5fb84068bd23e 100644 --- a/docs/reference/mapping/types/date_nanos.asciidoc +++ b/docs/reference/mapping/types/date_nanos.asciidoc @@ -1,5 +1,8 @@ [[date_nanos]] -=== date_nanos datatype +=== Date nanoseconds datatype +++++ +Date nanoseconds +++++ This datatype is an addition to the `date` datatype. However there is an important distinction between the two. The existing `date` datatype stores diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index b97566361a05c..9cd7fa18d3dbb 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -1,5 +1,8 @@ [[dense-vector]] === Dense vector datatype +++++ +Dense vector +++++ experimental[] diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 51e137fbc33b6..dc9c275ba5329 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -1,5 +1,8 @@ [[geo-point]] === Geo-point datatype +++++ +Geo-point +++++ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index a46b8a3f8a87c..5b6ba4fffc04e 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -1,5 +1,8 @@ [[geo-shape]] -=== Geo-Shape datatype +=== Geo-shape datatype +++++ +Geo-shape +++++ The `geo_shape` datatype facilitates the indexing of and searching with arbitrary geo shapes such as rectangles and polygons. It should be @@ -323,6 +326,7 @@ POST /example/_doc // CONSOLE [float] +[[linestring]] ===== http://geojson.org/geojson-spec.html#id3[LineString] A `linestring` defined by an array of two or more positions. By @@ -357,6 +361,7 @@ The above `linestring` would draw a straight line starting at the White House to the US Capitol Building. [float] +[[polygon]] ===== http://www.geojson.org/geojson-spec.html#id4[Polygon] A polygon is defined by a list of a list of points. The first and last @@ -473,6 +478,7 @@ POST /example/_doc // CONSOLE [float] +[[multipoint]] ===== http://www.geojson.org/geojson-spec.html#id5[MultiPoint] The following is an example of a list of geojson points: @@ -503,6 +509,7 @@ POST /example/_doc // CONSOLE [float] +[[multilinestring]] ===== http://www.geojson.org/geojson-spec.html#id6[MultiLineString] The following is an example of a list of geojson linestrings: @@ -535,6 +542,7 @@ POST /example/_doc // CONSOLE [float] +[[multipolygon]] ===== http://www.geojson.org/geojson-spec.html#id7[MultiPolygon] The following is an example of a list of geojson polygons (second polygon contains a hole): @@ -567,6 +575,7 @@ POST /example/_doc // CONSOLE [float] +[[geometry_collection]] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] The following is an example of a collection of geojson geometry objects: @@ -609,7 +618,7 @@ POST /example/_doc Elasticsearch supports an `envelope` type, which consists of coordinates for upper left and lower right points of the shape to represent a -bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]: +bounding rectangle in the format `[[minLon, maxLat], [maxLon, minLat]]`: [source,js] -------------------------------------------------- diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 7f3f5f57d7077..a1a56cf69fd07 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -1,5 +1,8 @@ [[ip]] === IP datatype +++++ +IP +++++ An `ip` field can index/store either https://en.wikipedia.org/wiki/IPv4[IPv4] or https://en.wikipedia.org/wiki/IPv6[IPv6] addresses. @@ -69,6 +72,7 @@ The following parameters are accepted by `ip` fields: the <> field. Accepts `true` or `false` (default). +[[query-ip-fields]] ==== Querying `ip` fields The most common way to query ip addresses is to use the diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index 8ac0983dc9550..61a603b4f2d1f 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -1,5 +1,8 @@ [[keyword]] === Keyword datatype +++++ +Keyword +++++ A field to index structured content such as email addresses, hostnames, status codes, zip codes or tags. diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index f420e680c8590..7adfe392485e9 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -1,5 +1,8 @@ [[nested]] === Nested datatype +++++ +Nested +++++ The `nested` type is a specialised version of the <> datatype that allows arrays of objects to be indexed in a way that they can be queried @@ -65,6 +68,7 @@ GET my_index/_search // CONSOLE // TEST[continued] +[[nested-fields-array-objects]] ==== Using `nested` fields for arrays of objects If you need to index arrays of objects and to maintain the independence of @@ -158,6 +162,22 @@ Nested documents can be: * sorted with <>. * retrieved and highlighted with <>. +[IMPORTANT] +============================================= + +Because nested documents are indexed as separate documents, they can only be +accessed within the scope of the `nested` query, the +`nested`/`reverse_nested` aggregations, or <>. + +For instance, if a string field within a nested document has +<> set to `offsets` to allow use of the postings +during the highlighting, these offsets will not be available during the main highlighting +phase. Instead, highlighting needs to be performed via +<>. The same consideration applies when loading +fields during a search through <> +or <>. + +============================================= [[nested-params]] ==== Parameters for `nested` fields @@ -177,35 +197,32 @@ The following parameters are accepted by `nested` fields: may be added to an existing nested object. -[IMPORTANT] -============================================= - -Because nested documents are indexed as separate documents, they can only be -accessed within the scope of the `nested` query, the -`nested`/`reverse_nested` aggregations, or <>. +[float] +=== Limits on `nested` mappings and objects -For instance, if a string field within a nested document has -<> set to `offsets` to allow use of the postings -during the highlighting, these offsets will not be available during the main highlighting -phase. Instead, highlighting needs to be performed via -<>. +As described earlier, each nested object is indexed as a separate document under the hood. +Continuing with the example above, if we indexed a single document containing 100 `user` objects, +then 101 Lucene documents would be created -- one for the parent document, and one for each +nested object. Because of the expense associated with `nested` mappings, Elasticsearch puts +settings in place to guard against performance problems: -============================================= +`index.mapping.nested_fields.limit`:: + The `nested` type should only be used in special cases, when arrays of objects need to be + queried independently of each other. To safeguard against poorly designed mappings, this setting + limits the number of unique `nested` types per index. In our example, the `user` mapping would + count as only 1 towards this limit. Defaults to 50. -==== Limiting the number of `nested` fields +`index.mapping.nested_objects.limit`:: -Indexing a document with 100 nested fields actually indexes 101 documents as each nested -document is indexed as a separate document. To safeguard against ill-defined mappings -the number of nested fields that can be defined per index has been limited to 50. See -<>. + This setting limits the number of nested objects that a single document may contain across all + `nested` types, in order to prevent out of memory errors when a document contains too many nested + objects. To illustrate how the setting works, say we added another `nested` type called `comments` + to our example mapping above. Then for each document, the combined number of `user` and `comment` + objects it contains must be below the limit. Defaults to 10000. +Additional background on these settings, including information on their default values, can be found +in <>. -==== Limiting the number of `nested` json objects -Indexing a document with an array of 100 objects within a nested field, will actually -create 101 documents, as each nested object will be indexed as a separate document. -To prevent out of memory errors when a single document contains too many nested json -objects, the number of nested json objects that a single document may contain across all fields -has been limited to 10000. See <>. diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index f2977957ff463..7298b54873b90 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -1,5 +1,8 @@ [[number]] === Numeric datatypes +++++ +Numeric +++++ The following numeric types are supported: diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index f5b9a9df85617..e127415c6181c 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -1,5 +1,8 @@ [[object]] === Object datatype +++++ +Object +++++ JSON documents are hierarchical in nature: the document may contain inner objects which, in turn, may contain inner objects themselves: diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index 39bcaa96d7764..14c7b7b275891 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -1,5 +1,8 @@ [[parent-join]] -=== `join` datatype +=== Join datatype +++++ +Join +++++ The `join` datatype is a special field that creates parent/child relation within documents of the same index. diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 0096746d2df35..cdc10bcaa036a 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -1,5 +1,8 @@ [[percolator]] === Percolator type +++++ +Percolator +++++ The `percolator` field type parses a json structure into a native query and stores that query, so that the <> @@ -720,6 +723,7 @@ fail. ==== Limitations [float] +[[parent-child]] ===== Parent/child Because the `percolate` query is processing one document at a time, it doesn't support queries and filters that run @@ -743,3 +747,11 @@ The script inside a `script` query can only access doc values fields. The `perco into an in-memory index. This in-memory index doesn't support stored fields and because of that the `_source` field and other stored fields are not stored. This is the reason why in the `script` query the `_source` and other stored fields aren't available. + +[float] +===== Field aliases + +Percolator queries that contain <> may not always behave as expected. In particular, if a +percolator query is registered that contains a field alias, and then that alias is updated in the mappings to refer +to a different field, the stored query will still refer to the original target field. To pick up the change to +the field alias, the percolator query must be explicitly reindexed. diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 79c9e6629c696..91bbbd0d6d044 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -1,5 +1,8 @@ [[range]] === Range datatypes +++++ +Range +++++ The following range types are supported: diff --git a/docs/reference/mapping/types/rank-feature.asciidoc b/docs/reference/mapping/types/rank-feature.asciidoc index 780a68216f49e..d066d0452d353 100644 --- a/docs/reference/mapping/types/rank-feature.asciidoc +++ b/docs/reference/mapping/types/rank-feature.asciidoc @@ -1,5 +1,8 @@ [[rank-feature]] === Rank feature datatype +++++ +Rank feature +++++ A `rank_feature` field can index numbers so that they can later be used to boost documents in queries with a <> query. diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index 9bc960b7f8351..b80db43651dd7 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -1,5 +1,8 @@ [[rank-features]] === Rank features datatype +++++ +Rank features +++++ A `rank_features` field can index numeric feature vectors, so that they can later be used to boost documents in queries with a diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index 38561789b5d3f..3c9f088ab5819 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -1,5 +1,8 @@ [[sparse-vector]] === Sparse vector datatype +++++ +Sparse vector +++++ experimental[] diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index ee972918988ad..46477972d5cf9 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -1,5 +1,8 @@ [[text]] === Text datatype +++++ +Text +++++ A field to index full-text values, such as the body of an email or the description of a product. These fields are `analyzed`, that is they are passed through an @@ -30,6 +33,8 @@ PUT my_index -------------------------------- // CONSOLE +[[text-multi-fields]] +==== Use a field as both text and keyword Sometimes it is useful to have both a full text (`text`) and a keyword (`keyword`) version of the same field: one for full text search and the other for aggregations and sorting. This can be achieved with diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index d574c25e93d19..a435be1e54d51 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -1,5 +1,8 @@ [[token-count]] === Token count datatype +++++ +Token count +++++ A field of type `token_count` is really an <> field which accepts string values, analyzes them, then indexes the number of tokens in the diff --git a/docs/reference/migration/apis/assistance.asciidoc b/docs/reference/migration/apis/assistance.asciidoc deleted file mode 100644 index 3a220644c16de..0000000000000 --- a/docs/reference/migration/apis/assistance.asciidoc +++ /dev/null @@ -1,95 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[migration-api-assistance]] -=== Migration assistance API -++++ -Migration assistance -++++ - -The Migration Assistance API analyzes existing indices in the cluster and -returns the information about indices that require some changes before the -cluster can be upgraded to the next major version. - -[float] -==== Request - -`GET /_migration/assistance` + - -`GET /_migration/assistance/` - -//==== Description - -[float] -==== Path Parameters - -`index_name`:: - (string) Identifier for the index. It can be an index name or a wildcard - expression. - -//==== Query Parameters - -//==== Authorization - -[float] -==== Examples - -To see a list of indices that needs to be upgraded or reindexed, submit a GET -request to the `/_migration/assistance` endpoint: - -[source,js] --------------------------------------------------- -GET /_migration/assistance --------------------------------------------------- -// CONSOLE -// TEST[skip:cannot create an old index in docs test] - -A successful call returns a list of indices that need to be updated or reindexed: - -[source,js] --------------------------------------------------- -{ - "indices" : { - ".watches" : { - "action_required" : "upgrade" - }, - ".security" : { - "action_required" : "upgrade" - }, - "my_old_index": { - "action_required" : "reindex" - }, - "my_other_old_index": { - "action_required" : "reindex" - } - } -} --------------------------------------------------- -// NOTCONSOLE - -To check a particular index or set of indices, specify this index name or mask -as the last part of the `/_migration/assistance/index_name` endpoint: - -[source,js] --------------------------------------------------- -GET /_migration/assistance/my_* --------------------------------------------------- -// CONSOLE -// TEST[skip:cannot create an old index in docs test] - -A successful call returns a list of indices that needs to be updated or reindexed -and match the index specified on the endpoint: - -[source,js] --------------------------------------------------- -{ - "indices" : { - "my_old_index": { - "action_required" : "reindex" - }, - "my_other_old_index": { - "action_required" : "reindex" - } - } -} --------------------------------------------------- -// NOTCONSOLE diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index 88de3f5d6e3fa..7ae840a37ea49 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -6,6 +6,11 @@ Deprecation info ++++ +IMPORTANT: Use this API to check for deprecated configuration before performing +a major version upgrade. You should run it on the the last minor version of the +major version you are upgrading from, as earlier minor versions may not include +all deprecations. + The deprecation API is to be used to retrieve information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. diff --git a/docs/reference/migration/apis/upgrade.asciidoc b/docs/reference/migration/apis/upgrade.asciidoc deleted file mode 100644 index 3545a4441df3e..0000000000000 --- a/docs/reference/migration/apis/upgrade.asciidoc +++ /dev/null @@ -1,142 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[migration-api-upgrade]] -=== Migration upgrade API -++++ -Migration upgrade -++++ - -The Migration Upgrade API performs the upgrade of internal indices to make them -compatible with the next major version. - -[float] -==== Request - -`POST /_migration/upgrade/` - -[float] -==== Description - -Indices must be upgraded one at a time. - -[float] -==== Path Parameters - -`index_name`:: - (string) Identifier for the index. - -`wait_for_completion`:: - (boolean) Defines whether the upgrade call blocks until the upgrade process is - finished. The default value is `true`. If set to `false`, the upgrade can be - performed asynchronously. - -//==== Query Parameters - -//==== Authorization - -[float] -==== Examples - -The following example submits a POST request to the -`/_migration/upgrade/` endpoint: - -[source,js] --------------------------------------------------- -POST /_migration/upgrade/.watches --------------------------------------------------- -// CONSOLE -// TEST[skip:cannot create an old index in docs test] - -A successful call returns the statistics about the upgrade process: - -[source,js] --------------------------------------------------- -{ - "took" : 127, - "timed_out" : false, - "total" : 4, - "updated" : 0, - "created" : 4, - "deleted" : 0, - "batches" : 1, - "version_conflicts" : 0, - "noops" : 0, - "retries" : { - "bulk" : 0, - "search" : 0 - }, - "throttled_millis" : 0, - "failures" : [ ] -} --------------------------------------------------- -// NOTCONSOLE - -The following example upgrades a large index asynchronously by specifying the -`wait_for_completion` parameter: - -[source,js] --------------------------------------------------- -POST /_migration/upgrade/.watches?wait_for_completion=false --------------------------------------------------- -// CONSOLE -// TEST[skip:cannot create an old index in docs test] - -This call should return the id of the upgrade process task: - -[source,js] --------------------------------------------------- -{ - "task" : "PFvgv7T6TGumRyFF3vqTFg:1137" -} --------------------------------------------------- -// NOTCONSOLE - -The status of the running or finished upgrade requests can be obtained by using -the <>: - -[source,js] --------------------------------------------------- -GET _tasks/PFvgv7T6TGumRyFF3vqTFg:1137?detailed=true --------------------------------------------------- -// CONSOLE -// TEST[skip:cannot create an old index in docs test] - -[source,js] --------------------------------------------------- -{ - "completed" : true, <1> - "task" : { - "node" : "PFvgv7T6TGumRyFF3vqTFg", - "id" : 1137, - "type" : "transport", - "action" : "cluster:admin/xpack/upgrade", - "description" : "", - "start_time_in_millis" : 1500650625413, - "running_time_in_nanos" : 947456819, - "cancellable" : true - }, - "response" : { <2> - "took" : 212, - "timed_out" : false, - "total" : 4, - "updated" : 0, - "created" : 4, - "deleted" : 0, - "batches" : 1, - "version_conflicts" : 0, - "noops" : 0, - "retries" : { - "bulk" : 0, - "search" : 0 - }, - "throttled_millis" : 0, - "failures" : [ ] - } -} --------------------------------------------------- -// NOTCONSOLE - -<1> If the `completed` field value is `true`, the upgrade request has finished. -If it is `false`, the request is still running. - -<2> The `response` field contains the status of the upgrade request. diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index aa74068419df0..fce0cae65cfc3 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -3,23 +3,16 @@ [partintro] -- -This section discusses the changes that you need to be aware of when migrating -your application from one version of Elasticsearch to another. +This section discusses the changes that you need to be aware of to migrate +your application to {version}. For more information about what's new in this +release, see the <> and <>. -As a general rule: +* <> +* <> -* Migration between minor versions -- e.g. `7.x` to `7.y` -- can be - performed by <>. - -* Migration between consecutive major versions -- e.g. `6.x` to `7.x` -- - requires a <>. - -* Migration between non-consecutive major versions -- e.g. `5.x` to `7.x` -- - is not supported. - -For more information, see <>. - -See also <> and <>. +For information about how to upgrade your cluster, see <>. -- + +include::migrate_7_1.asciidoc[] include::migrate_7_0.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 25c2e3eef440d..fa800b08535c9 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -13,8 +13,11 @@ See also <> and <>. * <> * <> * <> +* <> * <> +* <> * <> +* <> * <> * <> * <> @@ -27,9 +30,14 @@ See also <> and <>. * <> * <> * <> +* <> +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] [float] -=== Indices created before 7.0 +==== Indices created before 7.0 Elasticsearch 7.0 can read indices created in version 6.0 or above. An Elasticsearch 7.0 node will not start in the presence of indices created in a @@ -44,13 +52,18 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. ========================================= +// end::notable-breaking-changes[] + include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] include::migrate_7_0/cluster.asciidoc[] include::migrate_7_0/discovery.asciidoc[] +include::migrate_7_0/ingest.asciidoc[] include::migrate_7_0/indices.asciidoc[] include::migrate_7_0/mappings.asciidoc[] +include::migrate_7_0/ml.asciidoc[] include::migrate_7_0/search.asciidoc[] +include::migrate_7_0/suggesters.asciidoc[] include::migrate_7_0/packaging.asciidoc[] include::migrate_7_0/plugins.asciidoc[] include::migrate_7_0/api.asciidoc[] @@ -62,3 +75,4 @@ include::migrate_7_0/restclient.asciidoc[] include::migrate_7_0/low_level_restclient.asciidoc[] include::migrate_7_0/logging.asciidoc[] include::migrate_7_0/node.asciidoc[] +include::migrate_7_0/java_time.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/aggregations.asciidoc b/docs/reference/migration/migrate_7_0/aggregations.asciidoc index 2b8c2ed9cb783..f8a38495b6ec0 100644 --- a/docs/reference/migration/migrate_7_0/aggregations.asciidoc +++ b/docs/reference/migration/migrate_7_0/aggregations.asciidoc @@ -2,25 +2,39 @@ [[breaking_70_aggregations_changes]] === Aggregations changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + + [float] +[[removed-global-ordinals-hash-and-global-ordinals-low-cardinality-terms-agg]] ==== Deprecated `global_ordinals_hash` and `global_ordinals_low_cardinality` execution hints for terms aggregations have been removed These `execution_hint` are removed and should be replaced by `global_ordinals`. +//tag::notable-breaking-changes[] [float] +[[search-max-buckets-cluster-setting]] ==== `search.max_buckets` in the cluster setting The dynamic cluster setting named `search.max_buckets` now defaults to 10,000 (instead of unlimited in the previous version). Requests that try to return more than the limit will fail with an exception. +//end::notable-breaking-changes[] [float] +[[missing-option-removed-composite-agg]] ==== `missing` option of the `composite` aggregation has been removed The `missing` option of the `composite` aggregation, deprecated in 6.x, has been removed. `missing_bucket` should be used instead. [float] +[[replace-params-agg-with-state-context-variable]] ==== Replaced `params._agg` with `state` context variable in scripted metric aggregations The object used to share aggregation state between the scripts in a Scripted Metric @@ -28,7 +42,25 @@ Aggregation is now a variable called `state` available in the script context, ra being provided via the `params` object as `params._agg`. [float] +[[reduce-script-combine-script-params-mandatory]] ==== Make metric aggregation script parameters `reduce_script` and `combine_script` mandatory The metric aggregation has been changed to require these two script parameters to ensure users are explicitly defining how their data is processed. + +[float] +[[percentiles-percentile-ranks-return-null-instead-nan]] +==== `percentiles` and `percentile_ranks` now return `null` instead of `NaN` + +The `percentiles` and `percentile_ranks` aggregations used to return `NaN` in +the response if they were applied to an empty set of values. Because `NaN` is +not officially supported by JSON, it has been replaced with `null`. + +[float] +[[stats-extended-stats-return-zero-instead-null]] +==== `stats` and `extended_stats` now return 0 instead of `null` for zero docs + +When the `stats` and `extended_stats` aggregations collected zero docs (`doc_count: 0`), +their value would be `null`. This was in contrast with the `sum` aggregation which +would return `0`. The `stats` and `extended_stats` aggs are now consistent with +`sum` and also return zero. diff --git a/docs/reference/migration/migrate_7_0/analysis.asciidoc b/docs/reference/migration/migrate_7_0/analysis.asciidoc index 36ad41be09aa1..c586953378ea2 100644 --- a/docs/reference/migration/migrate_7_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_7_0/analysis.asciidoc @@ -2,7 +2,15 @@ [[breaking_70_analysis_changes]] === Analysis changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[limit-number-of-tokens-produced-by-analyze]] ==== Limiting the number of tokens produced by _analyze To safeguard against out of memory errors, the number of tokens that can be produced @@ -20,6 +28,7 @@ limited to 1000000. This default limit can be changed for a particular index with the index setting `index.highlight.max_analyzed_offset`. [float] +[[delimited-payload-filter-renaming]] ==== `delimited_payload_filter` renaming The `delimited_payload_filter` was deprecated and renamed to `delimited_payload` in 6.2. @@ -28,6 +37,7 @@ name in new indices created in 7.0 will throw an error. Use the new name `delimi instead. [float] +[[standard-filter-removed]] ==== `standard` filter has been removed The `standard` token filter has been removed because it doesn't change anything in the stream. @@ -38,4 +48,14 @@ The `standard` token filter has been removed because it doesn't change anything The `standard_html_strip` analyzer has been deprecated, and should be replaced with a combination of the `standard` tokenizer and `html_strip` char_filter. Indexes created using this analyzer will still be readable in elasticsearch 7.0, -but it will not be possible to create new indexes using it. \ No newline at end of file +but it will not be possible to create new indexes using it. + +[float] +[[deprecated-ngram-edgengram-token-filter-cannot-be-used]] +==== The deprecated `nGram` and `edgeNGram` token filter cannot be used on new indices + +The `nGram` and `edgeNGram` token filter names have been deprecated in an earlier 6.x version. +Indexes created using these token filters will still be readable in elasticsearch 7.0 but indexing +documents using those filter names will issue a deprecation warning. Using the deprecated names on +new indices starting with version 7.0.0 on will be prohibited and throw an error when indexing +or analyzing documents. Both names should be replaced by `ngram` or `edge_ngram` respectively. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 6c1d03760f904..de15860564eb5 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -2,6 +2,11 @@ [[breaking_70_api_changes]] === API changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + [float] ==== Internal Versioning is no longer supported for optimistic concurrency control @@ -14,9 +19,12 @@ while handling a write operation, it may expose a version that will then be reus new primary. Due to that issue, internal versioning can no longer be used and is replaced by a new -method based on sequence numbers. See <> for more details. +method based on sequence numbers. See +{ref}/optimistic-concurrency-control.html[Optimistic concurrency control] for +more details. Note that the `external` versioning type is still fully supported. +//end::notable-breaking-changes[] [float] ==== Camel case and underscore parameters deprecated in 6.x have been removed @@ -68,10 +76,33 @@ pools. Note that `core` and `max` will be populated for scaling thread pools, and `size` will be populated for fixed thread pools. [float] -==== The parameter `fields` deprecated in 6.x has been removed from Bulk request -and Update request. The Update API returns `400 - Bad request` if request contains +[[fields-param-removed-bulk-update-request]] +==== The parameter `fields` deprecated in 6.x has been removed from Bulk request +and Update request. The Update API returns `400 - Bad request` if request contains unknown parameters (instead of ignored in the previous version). +[float] +==== PUT Document with Version error message changed when document is missing + +If you attempt to `PUT` a document with versioning (e.g. `PUT /test/_doc/1?version=4`) +but the document does not exist, a cryptic message is returned: + +[source,text] +---------- +version conflict, current version [-1] is different than the one provided [4] +---------- + +Now if the document is missing a more helpful message is returned: + +[source,text] +---------- +document does not exist (expected version [4]) +---------- + +Although exceptions messages are liable to change and not generally subject to +backwards compatibility, the nature of this message might mean clients are relying +on parsing the version numbers and so the format change might impact some users. + [float] [[remove-suggest-metric]] ==== Remove support for `suggest` metric/index metric in indices stats and nodes stats APIs @@ -88,6 +119,7 @@ body. Specifying `fields` in the request body as opposed to a parameter was depr in 6.4.0, and is now unsupported in 7.0.0. [float] +[[copy-settings-deprecated-shrink-split-apis]] ==== `copy_settings` is deprecated on shrink and split APIs Versions of Elasticsearch prior to 6.4.0 did not copy index settings on shrink @@ -113,6 +145,7 @@ current user was not authorized for any alias. An empty response with status 200 - OK is now returned instead at all times. [float] +[[user-object-removed-put-user-api]] ==== Put User API response no longer has `user` object The Put User API response was changed in 6.5.0 to add the `created` field @@ -120,6 +153,7 @@ outside of the user object where it previously had been. In 7.0.0 the user object has been removed in favor of the top level `created` field. [float] +[[source-include-exclude-params-removed]] ==== Source filtering url parameters `_source_include` and `_source_exclude` have been removed The deprecated in 6.x url parameters are now removed. Use `_source_includes` and `_source_excludes` instead. @@ -138,6 +172,7 @@ removed. [float] +[[deprecated-termvector-endpoint-removed]] ==== Deprecated `_termvector` endpoint removed The `_termvector` endpoint was deprecated in 2.0 and has now been removed. @@ -154,3 +189,46 @@ However, the monitoring APIs were the only exception to this rule. This exceptio has been forfeited and index monitoring privileges have to be granted explicitly, using the `allow_restricted_indices` flag on the permission (as any other index privilege). + +[float] +[[remove-get-support-cache-clear-api]] +==== Removed support for `GET` on the `_cache/clear` API + +The `_cache/clear` API no longer supports the `GET` HTTP verb. It must be called +with `POST`. + +[float] +==== Cluster state size metrics removed from Cluster State API Response + +The `compressed_size` / `compressed_size_in_bytes` fields were removed from +the Cluster State API response. The calculation of the size was expensive and had +dubious value, so the field was removed from the response. + +[float] +==== Migration Assistance API has been removed + +The Migration Assistance API has been functionally replaced by the +Deprecation Info API, and the Migration Upgrade API is not used for the +transition from ES 6.x to 7.x, and does not need to be kept around to +repair indices that were not properly upgraded before upgrading the +cluster, as was the case in 6. + +[float] +==== Changes to thread pool naming in Node and Cat APIs +The `thread_pool` information returned from the Nodes and Cat APIs has been +standardized to use the same terminology as the thread pool configurations. +This means the response will align with the configuration instead of being +the same across all the thread pools, regardless of type. + +[float] +==== Return 200 when cluster has valid read-only blocks +If the cluster was configured with `no_master_block: write` and lost its master, +it would return a `503` status code from a main request (`GET /`) even though +there are viable read-only nodes available. The cluster now returns 200 status +in this situation. + +[float] +==== Clearing indices cache is now POST-only +Clearing the cache indices could previously be done via GET and POST. As GET should +only support read only non state-changing operations, this is no longer allowed. +Only POST can be used to clear the cache. diff --git a/docs/reference/migration/migrate_7_0/cluster.asciidoc b/docs/reference/migration/migrate_7_0/cluster.asciidoc index bfe7d5df2d094..260d5f766964c 100644 --- a/docs/reference/migration/migrate_7_0/cluster.asciidoc +++ b/docs/reference/migration/migrate_7_0/cluster.asciidoc @@ -2,26 +2,41 @@ [[breaking_70_cluster_changes]] === Cluster changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[_literal_literal_is_no_longer_allowed_in_cluster_name]] ==== `:` is no longer allowed in cluster name Due to cross-cluster search using `:` to separate a cluster and index name, cluster names may no longer contain `:`. [float] +[[new-default-wait-for-active-shards-param]] ==== New default for `wait_for_active_shards` parameter of the open index command The default value for the `wait_for_active_shards` parameter of the open index API is changed from 0 to 1, which means that the command will now by default wait for all primary shards of the opened index to be allocated. +//tag::notable-breaking-changes[] [float] +[[shard-preferences-removed]] ==== Shard preferences `_primary`, `_primary_first`, `_replica`, and `_replica_first` are removed These shard preferences are removed in favour of the `_prefer_nodes` and `_only_nodes` preferences. +//end::notable-breaking-changes[] +//tag::notable-breaking-changes[] [float] ==== Cluster-wide shard soft limit Clusters now have soft limits on the total number of open shards in the cluster based on the number of nodes and the `cluster.max_shards_per_node` cluster setting, to prevent accidental operations that would destabilize the cluster. -More information can be found in the <>. +More information can be found in the +{ref}/misc-cluster.html[documentation for that setting]. +//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_7_0/discovery.asciidoc b/docs/reference/migration/migrate_7_0/discovery.asciidoc index 56449625246cd..9e799ba3b6108 100644 --- a/docs/reference/migration/migrate_7_0/discovery.asciidoc +++ b/docs/reference/migration/migrate_7_0/discovery.asciidoc @@ -2,19 +2,25 @@ [[breaking_70_discovery_changes]] === Discovery changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + [float] ==== Cluster bootstrapping is required if discovery is configured The first time a cluster is started, `cluster.initial_master_nodes` must be set to perform cluster bootstrapping. It should contain the names of the master-eligible nodes in the initial cluster and be defined on every -master-eligible node in the cluster. See <> for an example, and the -<> describes this setting in more detail. +master-eligible node in the cluster. See +{ref}/discovery-settings.html[the discovery settings summary] for an example, and the +{ref}/modules-discovery-bootstrap-cluster.html[cluster bootstrapping reference +documentation] describes this setting in more detail. The `discovery.zen.minimum_master_nodes` setting is permitted, but ignored, on 7.x nodes. +//end::notable-breaking-changes[] [float] ==== Removing master-eligible nodes sometimes requires voting exclusions @@ -28,6 +34,7 @@ such as data-only nodes or coordinating-only nodes, voting exclusions are not required. Likewise, if you add nodes to the cluster, voting exclusions are not required. +//tag::notable-breaking-changes[] [float] ==== Discovery configuration is required in production @@ -38,11 +45,43 @@ file: - `discovery.seed_hosts` - `discovery.seed_providers` - `cluster.initial_master_nodes` +- `discovery.zen.ping.unicast.hosts` +- `discovery.zen.hosts_provider` + +The first three settings in this list are only available in versions 7.0 and +above. If you are preparing to upgrade from an earlier version, you must set +`discovery.zen.ping.unicast.hosts` or `discovery.zen.hosts_provider`. +//end::notable-breaking-changes[] [float] +[[new-name-no-master-block-setting]] ==== New name for `no_master_block` setting The `discovery.zen.no_master_block` setting is now known as `cluster.no_master_block`. Any value set for `discovery.zen.no_master_block` is now ignored. You should remove this setting and, if needed, set `cluster.no_master_block` appropriately after the upgrade. + +[float] +==== Reduced default timeouts for fault detection + +By default the <> subsystem +now considers a node to be faulty if it fails to respond to 3 consecutive +pings, each of which times out after 10 seconds. Thus a node that is +unresponsive for longer than 30 seconds is liable to be removed from the +cluster. Previously the default timeout for each ping was 30 seconds, so that +an unresponsive node might be kept in the cluster for over 90 seconds. + +[float] +==== Master-ineligible nodes are ignored by discovery + +In earlier versions it was possible to use master-ineligible nodes during the +discovery process, either as seed nodes or to transfer discovery gossip +indirectly between the master-eligible nodes. Clusters that relied on +master-ineligible nodes like this were fragile and unable to automatically +recover from some kinds of failure. Discovery now involves only the +master-eligible nodes in the cluster so that it is not possible to rely on +master-ineligible nodes like this. You should configure +<> to provide the addresses of all the master-eligible nodes in +your cluster. diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index f832ae7feb6d3..d9ad47776a98b 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -2,24 +2,33 @@ [[breaking_70_indices_changes]] === Indices changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + [float] ==== Index creation no longer defaults to five shards Previous versions of Elasticsearch defaulted to creating five shards per index. Starting with 7.0.0, the default is now one shard per index. +//end::notable-breaking-changes[] [float] +[[_literal_literal_is_no_longer_allowed_in_index_name]] ==== `:` is no longer allowed in index name Due to cross-cluster search using `:` to separate a cluster and index name, index names may no longer contain `:`. [float] +[[index-unassigned-node-left-delayed-timeout-no-longer-negative]] ==== `index.unassigned.node_left.delayed_timeout` may no longer be negative Negative values were interpreted as zero in earlier versions but are no longer accepted. [float] +[[flush-force-merge-no-longer-refresh]] ==== `_flush` and `_force_merge` will no longer refresh In previous versions issuing a `_flush` or `_force_merge` (with `flush=true`) @@ -38,7 +47,7 @@ exceeded a error is thrown only for new indices. For existing pre-7.0 indices, a warning is logged. [float] -==== Limit to the difference between max_size and min_size in ShingleTokenFilter +==== Limit to the difference between max_shingle_size and min_shingle_size in ShingleTokenFilter To safeguard against creating too many tokens, the difference between `max_shingle_size` and `min_shingle_size` in `ShingleTokenFilter` has been limited to 3. This default @@ -77,6 +86,7 @@ The following previously deprecated url parameter have been removed: * `field_data` - use `fielddata` instead [float] +[[network-breaker-inflight-requests-overhead-increased-to-2]] ==== `network.breaker.inflight_requests.overhead` increased to 2 Previously the in flight requests circuit breaker considered only the raw byte representation. @@ -100,11 +110,13 @@ there is less need for fielddata. Therefore, the default value of the setting heap size. [float] +[[fix-value-for-index-shard-check-on-startup-removed]] ==== `fix` value for `index.shard.check_on_startup` is removed Deprecated option value `fix` for setting `index.shard.check_on_startup` is not supported. [float] +[[elasticsearch-translog-removed]] ==== `elasticsearch-translog` is removed -Use the `elasticsearch-shard` tool to remove corrupted translog data. \ No newline at end of file +Use the `elasticsearch-shard` tool to remove corrupted translog data. diff --git a/docs/reference/migration/migrate_7_0/ingest.asciidoc b/docs/reference/migration/migrate_7_0/ingest.asciidoc new file mode 100644 index 0000000000000..a8c9b8084d651 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/ingest.asciidoc @@ -0,0 +1,27 @@ +[float] +[[breaking_70_ingest_changes]] +=== API changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +[float] +==== Ingest configuration exception information is now transmitted in metadata field + +Previously, some ingest configuration exception information about ingest processors +was sent to the client in the HTTP headers, which is inconsistent with how +exceptions are conveyed in other parts of Elasticsearch. + +Configuration exception information is now conveyed as a field in the response +body. +//end::notable-breaking-changes[] +[float] +==== Ingest plugin special handling has been removed +There was some special handling for installing and removing the `ingest-geoip` and +`ingest-user-agent` plugins after they were converted to modules. This special handling +was done to minimize breaking users in a minor release, and would exit with a status code +zero to avoid breaking automation. + +This special handling has now been removed. diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc index f34b1c6ca9906..e1533ad8c53b3 100644 --- a/docs/reference/migration/migrate_7_0/java.asciidoc +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -2,7 +2,15 @@ [[breaking_70_java_changes]] === Java API changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[isshardsacked-removed]] ==== `isShardsAcked` deprecated in `6.2` has been removed `isShardsAcked` has been replaced by `isShardsAcknowledged` in @@ -10,6 +18,7 @@ `CreateIndexClusterStateUpdateResponse`. [float] +[[prepareexecute-removed-client-api]] ==== `prepareExecute` removed from the client api The `prepareExecute` method which created a request builder has been @@ -29,18 +38,21 @@ was moved to `org.elasticsearch.search.aggregations.PipelineAggregationBuilders` [float] +[[retry-withbackoff-methods-removed]] ==== `Retry.withBackoff` methods with `Settings` removed The variants of `Retry.withBackoff` that included `Settings` have been removed because `Settings` is no longer needed. [float] +[[client-termvector-removed]] ==== Deprecated method `Client#termVector` removed The client method `termVector`, deprecated in 2.0, has been removed. The method `termVectors` (plural) should be used instead. [float] +[[abstractlifecyclecomponent-constructor-removed]] ==== Deprecated constructor `AbstractLifecycleComponent(Settings settings)` removed The constructor `AbstractLifecycleComponent(Settings settings)`, deprecated in 6.7 diff --git a/docs/reference/migration/migrate_7_0/java_time.asciidoc b/docs/reference/migration/migrate_7_0/java_time.asciidoc new file mode 100644 index 0000000000000..1c62c288f7bfd --- /dev/null +++ b/docs/reference/migration/migrate_7_0/java_time.asciidoc @@ -0,0 +1,126 @@ +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +[float] +[[breaking_70_java_time_changes]] +==== Replacing Joda-Time with java time + +Since Java 8 there is a dedicated `java.time` package, which is superior to +the Joda-Time library, that has been used so far in Elasticsearch. One of +the biggest advantages is the ability to be able to store dates in a higher +resolution than milliseconds for greater precision. Also this will allow us +to remove the Joda-Time dependency in the future. + +The mappings, aggregations and search code switched from Joda-Time to +java time. +//end::notable-breaking-changes[] + +[float] +==== Joda based date formatters are replaced with java ones + +With the release of Elasticsearch 6.7 a backwards compatibility layer was +introduced, that checked if you are using a Joda-Time based formatter, that is +supported differently in java time. A log message was emitted, and you could +create the proper java time based formatter prefixed with an `8`. + +With Elasticsearch 7.0 all formatters are now java based, which means you will +get exceptions when using deprecated formatters without checking the +deprecation log in 6.7. In the worst case you may even end up with different +dates. + +An example deprecation message looks like this, that is returned, when you +try to use a date formatter that includes a lower case `Y` + +[source,text] +---------- +Use of 'Y' (year-of-era) will change to 'y' in the next major version of +Elasticsearch. Prefix your date format with '8' to use the new specifier. +---------- + +So, instead of using `YYYY.MM.dd` you should use `8yyyy.MM.dd`. + +You can find more information about available formatting strings in the +https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html[DateTimeFormatter javadocs]. + +[float] +==== Date formats behavioural change + +The `epoch_millis` and `epoch_second` formatters no longer support +scientific notation. + +If you are using the century of era formatter in a date (`C`), this will no +longer be supported. + +The year-of-era formatting character is a `Y` in Joda-Time, but a lowercase +`y` in java time. + +The week-based-year formatting character is a lowercase `x` in Joda-Time, +but an upper-case `Y` in java time. + +[float] +==== Using time zones in the Java client + +Timezones have to be specified as java time based zone objects. This means, +instead of using a `org.joda.time.DateTimeZone` the use of +`java.time.ZoneId` is required. + +Examples of possible uses are the `QueryStringQueryBuilder`, the +`RangeQueryBuilder` or the `DateHistogramAggregationBuilder`, each of them +allow for an optional timezone for that part of the search request. + +[float] +==== Parsing aggregation buckets in the Java client + +The date based aggregation buckets in responses used to be of +type `JodaTime`. Due to migrating to java-time, the buckets are now of +type `ZonedDateTime`. As the client is returning untyped objects here, you +may run into class cast exceptions only when running the code, but not at +compile time, ensure you have proper test coverage for this in your +own code. + +[float] +[[parsing-gtm0-timezeone-jdk8-not-supported]] +==== Parsing `GMT0` timezone with JDK8 is not supported + +When you are running Elasticsearch 7 with Java 8, you are not able to parse +the timezone `GMT0` properly anymore. The reason for this is a bug in the +JDK, which has not been fixed for JDK8. You can read more in the +https://bugs.openjdk.java.net/browse/JDK-8138664[official issue] + +[float] +==== Scripting with dates should use java time based methods + +If dates are used in scripting, a backwards compatibility layer has been added +that emulates the Joda-Time methods, but logs a deprecation message as well +to use the java time methods. + +The following methods will be removed in future versions of Elasticsearch +and should be replaced. + +* `getDayOfWeek()` will be an enum instead of an int, if you need to use + an int, use `getDayOfWeekEnum().getValue()` +* `getMillis()` should be replaced with `toInstant().toEpochMilli()` +* `getCenturyOfEra()` should be replaced with `get(ChronoField.YEAR_OF_ERA) / 100` +* `getEra()` should be replaced with `get(ChronoField.ERA)` +* `getHourOfDay()` should be replaced with `getHour()` +* `getMillisOfDay()` should be replaced with `get(ChronoField.MILLI_OF_DAY)` +* `getMillisOfSecond()` should be replaced with `get(ChronoField.MILLI_OF_SECOND)` +* `getMinuteOfDay()` should be replaced with `get(ChronoField.MINUTE_OF_DAY)` +* `getMinuteOfHour()` should be replaced with `getMinute()` +* `getMonthOfYear()` should be replaced with `getMonthValue()` +* `getSecondOfDay()` should be replaced with `get(ChronoField.SECOND_OF_DAY)` +* `getSecondOfMinute()` should be replaced with `getSecond()` +* `getWeekOfWeekyear()` should be replaced with `get(WeekFields.ISO.weekOfWeekBasedYear())` +* `getWeekyear()` should be replaced with `get(WeekFields.ISO.weekBasedYear())` +* `getYearOfCentury()` should be replaced with `get(ChronoField.YEAR_OF_ERA) % 100` +* `getYearOfEra()` should be replaced with `get(ChronoField.YEAR_OF_ERA)` +* `toString(String)` should be replaced with a `DateTimeFormatter` +* `toString(String,Locale)` should be replaced with a `DateTimeFormatter` + +[float] +==== Negative epoch timestamps are no longer supported + +With the switch to java time, support for negative timestamps has been removed. +For dates before 1970, use a date format containing a year. diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc index 1329def9a1878..a2be4dd1760f5 100644 --- a/docs/reference/migration/migrate_7_0/logging.asciidoc +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -2,7 +2,15 @@ [[breaking_70_logging_changes]] === Logging changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[new-json-format-log-directory]] ==== New JSON format log files in `log` directory Elasticsearch now will produce additional log files in JSON format. They will be stored in `*.json` suffix files. @@ -21,6 +29,7 @@ Following files should be expected now in log directory: Note: You can configure which of these files are written by editing `log4j2.properties`. [float] +[[log-files-ending-log-deprecated]] ==== Log files ending with `*.log` deprecated Log files with the `.log` file extension using the old pattern layout format are now considered deprecated and the newly added JSON log file format with diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc index 0820c7f01cc70..e0255196d6cb1 100644 --- a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -2,6 +2,21 @@ [[breaking_70_low_level_restclient_changes]] === Low-level REST client changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +[[maxretrytimeout-removed]] +==== Support for `maxRetryTimeout` removed from RestClient + +`RestClient` and `RestClientBuilder` no longer support the `maxRetryTimeout` +setting. The setting was removed as its counting mechanism was not accurate +and caused issues while adding little value. + [float] ==== Deprecated flavors of performRequest have been removed @@ -15,3 +30,8 @@ backwards compatibility. We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports host metadata used by the `NodeSelector`. + +[float] +==== Minimum compiler version change +The minimum compiler version on the low-level REST client has been bumped +to JDK 8. diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 653dd2fb4ca46..0cf276494714c 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -2,31 +2,45 @@ [[breaking_70_mappings_changes]] === Mapping changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[all-meta-field-removed]] ==== The `_all` meta field is removed The `_all` field deprecated in 6 have now been removed. [float] +[[uid-meta-field-removed]] ==== The `_uid` meta field is removed This field used to index a composite key formed of the `_type` and the `_id`. Now that indices cannot have multiple types, this has been removed in favour of `_id`. +//tag::notable-breaking-changes[] [float] +[[default-mapping-not-allowed]] ==== The `_default_` mapping is no longer allowed The `_default_` mapping has been deprecated in 6.0 and is now no longer allowed in 7.0. Trying to configure a `_default_` mapping on 7.x indices will result in an error. +//end::notable-breaking-changes[] [float] +[[index-options-numeric-fields-removed]] ==== `index_options` for numeric fields has been removed The `index_options` field for numeric fields has been deprecated in 6 and has now been removed. [float] +[[limit-number-nested-json-objects]] ==== Limiting the number of `nested` json objects To safeguard against out of memory errors, the number of nested json objects within a single @@ -34,11 +48,13 @@ document across all fields has been limited to 10000. This default limit can be the index setting `index.mapping.nested_objects.limit`. [float] +[[update-all-types-option-removed]] ==== The `update_all_types` option has been removed This option is useless now that all indices have at most one type. [float] +[[classic-similarity-removed]] ==== The `classic` similarity has been removed The `classic` similarity relied on coordination factors for scoring to be good @@ -54,6 +70,7 @@ An error will now be thrown when unknown configuration options are provided to similarities. Such unknown parameters were ignored before. [float] +[[changed-default-geo-shape-index-strategy]] ==== Changed default `geo_shape` indexing strategy `geo_shape` types now default to using a vector indexing approach based on Lucene's new @@ -67,8 +84,15 @@ should also be changed in the template to explicitly define `tree` to one of `ge or `quadtree`. This will ensure compatibility with previously created indexes. [float] +[[deprecated-geo-shape-params]] ==== Deprecated `geo_shape` parameters The following type parameters are deprecated for the `geo_shape` field type: `tree`, `precision`, `tree_levels`, `distance_error_pct`, `points_only`, and `strategy`. They will be removed in a future version. + +[float] +[[include-type-name-defaults-false]] +==== `include_type_name` now defaults to `false` +The default for `include_type_name` is now `false` for all APIs that accept +the parameter. diff --git a/docs/reference/migration/migrate_7_0/ml.asciidoc b/docs/reference/migration/migrate_7_0/ml.asciidoc new file mode 100644 index 0000000000000..89b772328214d --- /dev/null +++ b/docs/reference/migration/migrate_7_0/ml.asciidoc @@ -0,0 +1,15 @@ +[float] +[[breaking_70_ml_changes]] +=== ML changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Types in Datafeed config are no longer valid +Types have been removed from the datafeed config and are no longer +valid parameters. diff --git a/docs/reference/migration/migrate_7_0/node.asciidoc b/docs/reference/migration/migrate_7_0/node.asciidoc index 3b8a9d84e765d..a90366189ece7 100644 --- a/docs/reference/migration/migrate_7_0/node.asciidoc +++ b/docs/reference/migration/migrate_7_0/node.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_node_changes]] === Node start up +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Nodes with left-behind data or metadata refuse to start Repurposing an existing node by changing node.master or node.data to false can leave lingering on-disk metadata and diff --git a/docs/reference/migration/migrate_7_0/packaging.asciidoc b/docs/reference/migration/migrate_7_0/packaging.asciidoc index e2380613d8f7b..f2b4c2a8a2ffb 100644 --- a/docs/reference/migration/migrate_7_0/packaging.asciidoc +++ b/docs/reference/migration/migrate_7_0/packaging.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_packaging_changes]] === Packaging changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] [[systemd-service-file-config]] ==== systemd service file is no longer configuration @@ -16,3 +23,15 @@ in `/etc/systemd/system/elasticsearch.service.d/override.conf`. The tar package previously included files in the `bin` directory meant only for windows. These files have been removed. Use the `zip` package instead. + +[float] +==== Ubuntu 14.04 is no longer supported + +Ubuntu 14.04 will reach end-of-life on April 30, 2019. As such, we are no longer +supporting Ubuntu 14.04. + +[float] +==== CLI secret prompting is no longer supported +The ability to use `${prompt.secret}` and `${prompt.text}` to collect secrets +from the CLI at server start is no longer supported. Secure settings have replaced +the need for these prompts. diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index 05de7e85b8e92..953903f162584 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_plugins_changes]] === Plugins changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] ==== Azure Repository plugin @@ -64,14 +71,23 @@ Setting.AffixSetting MY_SETTING = Setting.affixKeySetting( The `RealmSettings.simpleString` method can be used as a convenience for the above. +//tag::notable-breaking-changes[] [float] ==== Tribe node removed Tribe node functionality has been removed in favor of -<>. +{ref}/modules-cross-cluster-search.html[{ccs}]. +//end::notable-breaking-changes[] [float] ==== Discovery implementations are no longer pluggable * The method `DiscoveryPlugin#getDiscoveryTypes()` was removed, so that plugins can no longer provide their own discovery implementations. + +[float] +[[watcher-hipchat-action-removed]] +==== Watcher 'hipchat' action removed + +Hipchat has been deprecated and shut down as a service. The `hipchat` action for +watches has been removed. diff --git a/docs/reference/migration/migrate_7_0/restclient.asciidoc b/docs/reference/migration/migrate_7_0/restclient.asciidoc index 39d19c345cd95..bdf841a35ac5a 100644 --- a/docs/reference/migration/migrate_7_0/restclient.asciidoc +++ b/docs/reference/migration/migrate_7_0/restclient.asciidoc @@ -2,7 +2,15 @@ [[breaking_70_restclient_changes]] === High-level REST client changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + [float] +[[remove-header-args]] ==== API methods accepting `Header` argument have been removed All API methods accepting headers as a `Header` varargs argument, deprecated @@ -15,18 +23,10 @@ e.g. `client.index(indexRequest, new Header("name" "value"))` becomes `client.index(indexRequest, RequestOptions.DEFAULT.toBuilder().addHeader("name", "value").build());` [float] +[[cluster-health-api-default-cluster-level]] ==== Cluster Health API default to `cluster` level The Cluster Health API used to default to `shards` level to ease migration from transport client that doesn't support the `level` parameter and always returns information including indices and shards details. The level default value has been aligned with the Elasticsearch default level: `cluster`. - -=== Low-level REST client changes - -[float] -==== Support for `maxRetryTimeout` removed from RestClient - -`RestClient` and `RestClientBuilder` no longer support the `maxRetryTimeout` -setting. The setting was removed as its counting mechanism was not accurate -and caused issues while adding little value. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/scripting.asciidoc b/docs/reference/migration/migrate_7_0/scripting.asciidoc index 99afca91e0119..2427cafca3104 100644 --- a/docs/reference/migration/migrate_7_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_7_0/scripting.asciidoc @@ -2,6 +2,14 @@ [[breaking_70_scripting_changes]] === Scripting changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + + [float] ==== getDate() and getDates() removed @@ -24,6 +32,7 @@ To check if a document is missing a value, you can use [float] +[[script-errors-return-400-error-codes]] ==== Script errors will return as `400` error codes Malformed scripts, either in search templates, ingest pipelines or search diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index afe96fd8851a9..ba980ba7b0c65 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -2,6 +2,38 @@ [[breaking_70_search_changes]] === Search and Query DSL changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Off-heap terms index + +The terms dictionary is the part of the inverted index that records all terms +that occur within a segment in sorted order. In order to provide fast retrieval, +terms dictionaries come with a small terms index that allows for efficient +random access by term. Until now this terms index had always been loaded +on-heap. + +As of 7.0, the terms index is loaded on-heap for fields that only have unique +values such as `_id` fields, and off-heap otherwise - likely most other fields. +This is expected to reduce memory requirements but might slow down search +requests if both below conditions are met: + +* The size of the data directory on each node is significantly larger than the + amount of memory that is available to the filesystem cache. + +* The number of matches of the query is not several orders of magnitude greater + than the number of terms that the query tries to match, either explicitly via + `term` or `terms` queries, or implicitly via multi-term queries such as + `prefix`, `wildcard` or `fuzzy` queries. + +This change affects both existing indices created with Elasticsearch 6.x and new +indices created with Elasticsearch 7.x. + [float] ==== Changes to queries * The default value for `transpositions` parameter of `fuzzy` query @@ -45,6 +77,7 @@ PUT /_cluster/settings // CONSOLE [float] +[[search-api-returns-400-invalid-requests]] ==== Search API returns `400` for invalid requests The Search API returns `400 - Bad request` while it would previously return @@ -59,6 +92,7 @@ The Search API returns `400 - Bad request` while it would previously return * script compilation errors [float] +[[scroll-queries-cannot-use-request-cache]] ==== Scroll queries cannot use the `request_cache` anymore Setting `request_cache:true` on a query that creates a scroll (`scroll=1m`) @@ -66,6 +100,7 @@ has been deprecated in 6 and will now return a `400 - Bad request`. Scroll queries are not meant to be cached. [float] +[[scroll-queries-cannot-use-rescore]] ==== Scroll queries cannot use `rescore` anymore Including a rescore clause on a query that creates a scroll (`scroll=1m`) has @@ -85,6 +120,7 @@ removed. * `jarowinkler` - replaced by `jaro_winkler` [float] +[[popular-mode-suggesters]] ==== `popular` mode for Suggesters The `popular` mode for Suggesters (`term` and `phrase`) now uses the doc frequency @@ -117,6 +153,7 @@ To safeguard against this, a hard limit of 1024 fields has been introduced for q using the "all fields" mode ("default_field": "*") or other fieldname expansions (e.g. "foo*"). [float] +[[invalid-search-request-body]] ==== Invalid `_search` request body Search requests with extra content after the main object will no longer be accepted @@ -144,6 +181,7 @@ For geo context the value of the `path` parameter is now validated against the m and the context is only accepted if `path` points to a field with `geo_point` type. [float] +[[semantics-changed-max-concurrent-shared-requests]] ==== Semantics changed for `max_concurrent_shard_requests` `max_concurrent_shard_requests` used to limit the total number of concurrent shard @@ -151,6 +189,7 @@ requests a single high level search request can execute. In 7.0 this changed to max number of concurrent shard requests per node. The default is now `5`. [float] +[[max-score-set-to-null-when-untracked]] ==== `max_score` set to `null` when scores are not tracked `max_score` used to be set to `0` whenever scores are not tracked. `null` is now used @@ -180,7 +219,9 @@ on whether queries need to access score or not. As a result `bool` queries with `minimum_should_match` to 1. This behavior has been deprecated in the previous major version. +//tag::notable-breaking-changes[] [float] +[[hits-total-now-object-search-response]] ==== `hits.total` is now an object in the search response The total hits that match the search request is now returned as an object @@ -188,27 +229,31 @@ with a `value` and a `relation`. `value` indicates the number of hits that match and `relation` indicates whether the value is accurate (`eq`) or a lower bound (`gte`): -``` +[source,js] +-------------------------------------------------- { "hits": { - "total": { <1> + "total": { "value": 1000, "relation": "eq" }, ... } } -``` +-------------------------------------------------- +// NOTCONSOLE -The "total" object in the response indicates that the query matches exactly 1000 +The `total` object in the response indicates that the query matches exactly 1000 documents ("eq"). The `value` is always accurate (`"relation": "eq"`) when `track_total_hits` is set to true in the request. You can also retrieve `hits.total` as a number in the rest response by adding `rest_total_hits_as_int=true` in the request parameter of the search request. This parameter has been added to ease the transition to the new format and will be removed in the next major version (8.0). +//end::notable-breaking-changes[] [float] +[[hits-total-omitted-if-disabled]] ==== `hits.total` is omitted in the response if `track_total_hits` is disabled (false) If `track_total_hits` is set to `false` in the search request the search response @@ -216,7 +261,9 @@ will set `hits.total` to null and the object will not be displayed in the rest layer. You can add `rest_total_hits_as_int=true` in the search request parameters to get the old format back (`"total": -1`). +//tag::notable-breaking-changes[] [float] +[[track-total-hits-10000-default]] ==== `track_total_hits` defaults to 10,000 By default search request will count the total hits accurately up to `10,000` @@ -245,4 +292,23 @@ documents. If the total number of hits that match the query is greater than this <2> This is a lower bound (`"gte"`). You can force the count to always be accurate by setting `"track_total_hits` -to true explicitly in the search request. \ No newline at end of file +to true explicitly in the search request. +//end::notable-breaking-changes[] + +[float] +==== Limitations on Similarities +Lucene 8 introduced more constraints on similarities, in particular: + +- scores must not be negative, +- scores must not decrease when term freq increases, +- scores must not increase when norm (interpreted as an unsigned long) increases. + +[float] +==== Weights in Function Score must be positive +Negative `weight` parameters in the `function_score` are no longer allowed. + +[float] +==== Query string and Simple query string limit expansion of fields to 1024 +The number of automatically expanded fields for the "all fields" +mode (`"default_field": "*"`) for the query_string and simple_query_string +queries is now 1024 fields. diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 2e5631b378652..428c340c49930 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -2,12 +2,19 @@ [[breaking_70_settings_changes]] === Settings changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + [float] +[[default-node-name-now-hostname]] ==== The default for `node.name` is now the hostname `node.name` now defaults to the hostname at the time when Elasticsearch is started. Previously the default node name was the first eight characters of the node id. It can still be configured explicitly in `elasticsearch.yml`. +//end::notable-breaking-changes[] [float] ==== Percolator @@ -92,6 +99,7 @@ Moreover, `xpack.security.audit.logfile.emit_node_name` has changed its default from `true` to `false`. All other settings mentioned before, have kept their default value of `false`. +//tag::notable-breaking-changes[] [float] [[include-realm-type-in-setting]] ==== Security realms settings @@ -121,6 +129,7 @@ xpack.security.authc.realms: Any realm specific secure settings that have been stored in the elasticsearch keystore (such as ldap bind passwords, or passwords for ssl keys) must be updated in a similar way. +//end::notable-breaking-changes[] [float] [[tls-setting-fallback]] @@ -132,6 +141,7 @@ fallback to a default configuration when using TLS. Each component (realm, trans http client, etc) must now be configured with their own settings for TLS if it is being used. +//tag::notable-breaking-changes[] [float] [[tls-v1-removed]] ==== TLS v1.0 disabled @@ -144,6 +154,7 @@ You can enable TLS v1.0 by configuring the relevant `ssl.supported_protocols` se -------------------------------------------------- xpack.security.http.ssl.supported_protocols: [ "TLSv1.3", "TLSv1.2", "TLSv1.1", "TLSv1" ] -------------------------------------------------- +//end::notable-breaking-changes[] [float] [[trial-explicit-security]] @@ -201,6 +212,25 @@ Elastic Stack to handle the indexing part. [float] [[ingest-user-agent-ecs-always]] -==== Ingest User Agent processor always uses `ecs` output format -The deprecated `ecs` setting for the user agent ingest processor has been -removed. https://github.com/elastic/ecs[ECS] format is now the default. +==== Ingest User Agent processor defaults uses `ecs` output format +https://github.com/elastic/ecs[ECS] format is now the default. +The `ecs` setting for the user agent ingest processor now defaults to true. + +[float] +[[remove-action-master-force_local]] +==== Remove `action.master.force_local` + +The `action.master.force_local` setting was an undocumented setting, used +internally by the tribe node to force reads to local cluster state (instead of +forwarding to a master, which tribe nodes did not have). Since the tribe +node was removed, this setting was removed too. + +[float] +==== Enforce cluster-wide shard limit +The cluster-wide shard limit is now enforced and not optional. The limit can +still be adjusted as desired using the cluster settings API. + +[float] +==== HTTP Max content length setting is no longer parsed leniently +Previously, `http.max_content_length` would reset to `100mb` if the setting was +`Integer.MAX_VALUE`. This leniency has been removed. diff --git a/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc index 2098eb3574ca8..d5c04ebd663d3 100644 --- a/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc +++ b/docs/reference/migration/migrate_7_0/snapshotstats.asciidoc @@ -2,6 +2,13 @@ [[breaking_70_snapshotstats_changes]] === Snapshot stats changes +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + Snapshot stats details are provided in a new structured way: * `total` section for all the files that are referenced by the snapshot. @@ -9,6 +16,7 @@ Snapshot stats details are provided in a new structured way: * In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied. [float] +[[snapshot-stats-deprecated]] ==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed * Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`. diff --git a/docs/reference/migration/migrate_7_0/suggesters.asciidoc b/docs/reference/migration/migrate_7_0/suggesters.asciidoc new file mode 100644 index 0000000000000..5e00b0d249d27 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/suggesters.asciidoc @@ -0,0 +1,21 @@ +[float] +[[breaking_70_suggesters_changes]] +=== Suggesters changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== Registration of suggesters in plugins has changed + +Plugins must now explicitly indicate the type of suggestion that they produce. + +[float] +==== Phrase suggester now multiples alpha +Previously, the laplace smoothing used by the phrase suggester added `alpha`, +when it should instead multiply. This behavior has been changed and will +affect suggester scores. diff --git a/docs/reference/migration/migrate_7_1.asciidoc b/docs/reference/migration/migrate_7_1.asciidoc new file mode 100644 index 0000000000000..e66882f0f0dc1 --- /dev/null +++ b/docs/reference/migration/migrate_7_1.asciidoc @@ -0,0 +1,64 @@ +[[breaking-changes-7.1]] +== Breaking changes in 7.1 +++++ +7.1 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 7.1. + +See also <> and <>. + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +[[breaking_71_http_changes]] +=== HTTP changes + +[float] +==== Deprecation of old HTTP settings + +The `http.tcp_no_delay` setting is deprecated in 7.1. It is replaced by +`http.tcp.no_delay`. + +[float] +[[breaking_71_network_changes]] +=== Network changes + +[float] +==== Deprecation of old network settings + +The `network.tcp.connect_timeout` setting is deprecated in 7.1. This setting +was a fallback setting for `transport.connect_timeout`. To change the default +connection timeout for client connections, modify `transport.connect_timeout`. + +[float] +[[breaking_71_transport_changes]] +=== Transport changes + +//tag::notable-breaking-changes[] +[float] +==== Deprecation of old transport settings + +The following settings are deprecated in 7.1. Each setting has a replacement +setting that was introduced in 6.7. + +- `transport.tcp.port` is replaced by `transport.port` +- `transport.tcp.compress` is replaced by `transport.compress` +- `transport.tcp.connect_timeout` is replaced by `transport.connect_timeout` +- `transport.tcp_no_delay` is replaced by `transport.tcp.no_delay` +- `transport.profiles.profile_name.tcp_no_delay` is replaced by +`transport.profiles.profile_name.tcp.no_delay` +- `transport.profiles.profile_name.tcp_keep_alive` is replaced by +`transport.profiles.profile_name.tcp.keep_alive` +- `transport.profiles.profile_name.reuse_address` is replaced by +`transport.profiles.profile_name.tcp.reuse_address` +- `transport.profiles.profile_name.send_buffer_size` is replaced by `transport.profiles.profile_name.tcp.send_buffer_size` +- `transport.profiles.profile_name.receive_buffer_size` is replaced by `transport.profiles.profile_name.tcp.receive_buffer_size` + +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migration.asciidoc b/docs/reference/migration/migration.asciidoc index a54da21ab1409..bf46b3b5a5bdf 100644 --- a/docs/reference/migration/migration.asciidoc +++ b/docs/reference/migration/migration.asciidoc @@ -5,10 +5,6 @@ The migration APIs simplify upgrading {xpack} indices from one version to another. -* <> -* <> * <> -include::apis/assistance.asciidoc[] -include::apis/upgrade.asciidoc[] include::apis/deprecation.asciidoc[] diff --git a/docs/reference/ml/apis/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc index c5f9b5fc2444a..3a9024d2e3d0d 100644 --- a/docs/reference/ml/apis/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -12,8 +12,8 @@ A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. - -==== Request +[[ml-close-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_close` + @@ -21,8 +21,14 @@ operations, but you can still explore and navigate results. `POST _ml/anomaly_detectors/_all/_close` + +[[ml-close-job-prereqs]] +==== {api-prereq-title} -==== Description +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[[ml-close-job-desc]] +==== {api-description-title} You can close multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs @@ -47,15 +53,29 @@ after the close job API returns. The `force` query parameter should only be use situations where the job has already failed, or where you are not interested in results the job might have recently produced or might produce in the future. - -==== Path Parameters +[[ml-close-job-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. +[[ml-close-job-query-parms]] +==== {api-query-parms-title} -==== Query Parameters +`allow_no_jobs`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- `force`:: (boolean) Use to close a failed job, or to forcefully close a job which has not @@ -65,14 +85,15 @@ results the job might have recently produced or might produce in the future. (time units) Controls the time to wait until a job has closed. The default value is 30 minutes. +[[ml-close-job-response-codes]] +==== {api-response-codes-title} -==== Authorization - -You must have `manage_ml`, or `manage` cluster privileges to use this API. -For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. - +`404` (Missing resources):: + If `allow_no_jobs` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. -==== Examples +[[ml-close-job-example]] +==== {api-examples-title} The following example closes the `total-requests` job: diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 8e1251067dd9f..44d9d343712ad 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -61,12 +61,12 @@ A {dfeed} resource has the following properties: `delayed_data_check_config`:: (object) Specifies whether the data feed checks for missing data and - and the size of the window. For example: + the size of the window. For example: `{"enabled": true, "check_window": "1h"}` See <>. [[ml-datafeed-chunking-config]] -==== Chunking Configuration Objects +==== Chunking configuration objects {dfeeds-cap} might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load @@ -88,31 +88,33 @@ A chunking configuration object has the following properties: For example: `3h`. [[ml-datafeed-delayed-data-check-config]] -==== Delayed Data Check Configuration Objects +==== Delayed data check configuration objects The {dfeed} can optionally search over indices that have already been read in -an effort to find if any data has since been added to the index. If missing data -is found, it is a good indication that the `query_delay` option is set too low and -the data is being indexed after the {dfeed} has passed that moment in time. See +an effort to determine whether any data has subsequently been added to the index. +If missing data is found, it is a good indication that the `query_delay` option +is set too low and the data is being indexed after the {dfeed} has passed that +moment in time. See {stack-ov}/ml-delayed-data-detection.html[Working with delayed data]. -This check only runs on real-time {dfeeds} +This check runs only on real-time {dfeeds}. The configuration object has the following properties: `enabled`:: - (boolean) Should the {dfeed} periodically check for data being indexed after reading. - Defaults to `true` + (boolean) Specifies whether the {dfeed} periodically checks for delayed data. + Defaults to `true`. `check_window`:: - (time units) The window of time before the latest finalized bucket that should be searched - for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated - when the real-time {dfeed} runs. - The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. + (time units) The window of time that is searched for late data. This window of + time ends with the latest finalized bucket. It defaults to `null`, which + causes an appropriate `check_window` to be calculated when the real-time + {dfeed} runs. In particular, the default `check_window` span calculation is + based on the maximum of `2h` or `8 * bucket_span`. [float] [[ml-datafeed-counts]] -==== {dfeed-cap} Counts +==== {dfeed-cap} counts The get {dfeed} statistics API provides information about the operational progress of a {dfeed}. All of these properties are informational; you cannot diff --git a/docs/reference/ml/apis/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc index 5940d5c70f044..d2a7845be7584 100644 --- a/docs/reference/ml/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-delete-datafeed]] === Delete {dfeeds} API + +[subs="attributes"] ++++ Delete {dfeeds} ++++ diff --git a/docs/reference/ml/apis/eventresource.asciidoc b/docs/reference/ml/apis/eventresource.asciidoc index 7999c9744aea0..6c552b68f368f 100644 --- a/docs/reference/ml/apis/eventresource.asciidoc +++ b/docs/reference/ml/apis/eventresource.asciidoc @@ -13,15 +13,15 @@ An events resource has the following properties: (string) A description of the scheduled event. `end_time`:: - (string) The timestamp for the end of the scheduled event. The datetime string - is in ISO 8601 format. + (date) The timestamp for the end of the scheduled event + in milliseconds since the epoch or ISO 8601 format. `event_id`:: (string) An automatically-generated identifier for the scheduled event. `start_time`:: - (string) The timestamp for the beginning of the scheduled event. The datetime - string is in ISO 8601 format. + (date) The timestamp for the beginning of the scheduled event + in milliseconds since the epoch or ISO 8601 format. For more information, see {xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. diff --git a/docs/reference/ml/apis/filterresource.asciidoc b/docs/reference/ml/apis/filterresource.asciidoc index a9748949ffd58..520a2a99a3c71 100644 --- a/docs/reference/ml/apis/filterresource.asciidoc +++ b/docs/reference/ml/apis/filterresource.asciidoc @@ -9,7 +9,7 @@ A filter resource has the following properties: (string) A string that uniquely identifies the filter. `description`:: - (array) A description of the filter. + (string) A description of the filter. `items`:: (array of strings) An array of strings which is the filter item list. diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index caed632bda0e5..9c21d2a88b49a 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -164,7 +164,7 @@ format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. If this parameter is not specified, the structure finder chooses the best format from -the formats it knows, which are these Java time formats and their Joda equivalents: +the formats it knows, which are these Java time formats: * `dd/MMM/yyyy:HH:mm:ss XX` * `EEE MMM dd HH:mm zzz yyyy` diff --git a/docs/reference/ml/apis/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc index 0bb8a30afaf7f..09e429b1f6de3 100644 --- a/docs/reference/ml/apis/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -30,6 +30,7 @@ You can get information for a single calendar or for all calendars by using ==== Request Body +`page`:: `from`::: (integer) Skips the specified number of calendars. diff --git a/docs/reference/ml/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc index 34c27d3dae962..6f78f557f3f50 100644 --- a/docs/reference/ml/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-get-datafeed-stats]] === Get {dfeed} statistics API + +[subs="attributes"] ++++ Get {dfeed} statistics ++++ @@ -41,6 +43,22 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. +[[ml-get-datafeed-stats-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- ==== Results @@ -50,6 +68,12 @@ The API returns the following information: (array) An array of {dfeed} count objects. For more information, see <>. +[[ml-get-datafeed-stats-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. ==== Authorization diff --git a/docs/reference/ml/apis/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc index 402838742dfa6..92e4c804b855e 100644 --- a/docs/reference/ml/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-get-datafeed]] === Get {dfeeds} API + +[subs="attributes"] ++++ Get {dfeeds} ++++ @@ -36,6 +38,22 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. +[[ml-get-datafeed-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- ==== Results @@ -45,6 +63,12 @@ The API returns the following information: (array) An array of {dfeed} objects. For more information, see <>. +[[ml-get-datafeed-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. ==== Authorization diff --git a/docs/reference/ml/apis/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc index 07fc25d756a6a..53dfab2530a10 100644 --- a/docs/reference/ml/apis/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -28,7 +28,7 @@ You can get a single filter or all filters. For more information, see (string) Identifier for the filter. -==== Request Body +==== Querystring Parameters `from`::: (integer) Skips the specified number of filters. diff --git a/docs/reference/ml/apis/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc index b674b01802bee..8bc3cfa57a21b 100644 --- a/docs/reference/ml/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -9,8 +9,8 @@ Retrieves usage information for jobs. -==== Request - +[[ml-get-job-stats-request]] +==== {api-request-title} `GET _ml/anomaly_detectors//_stats` @@ -21,8 +21,15 @@ Retrieves usage information for jobs. `GET _ml/anomaly_detectors/_all/_stats` + +[[ml-get-job-stats-prereqs]] +==== {api-prereq-title} -===== Description +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[[ml-get-job-stats-desc]] +==== {api-description-title} You can get statistics for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -31,16 +38,33 @@ get statistics for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. - -==== Path Parameters +[[ml-get-job-stats-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) An identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns statistics for all jobs. +[[ml-get-job-stats-query-parms]] +==== {api-query-parms-title} + +`allow_no_jobs`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. -==== Results +The default value is `true`, which returns an empty `jobs` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- + +[[ml-get-job-stats-results]] +==== {api-response-body-title} The API returns the following information: @@ -48,15 +72,15 @@ The API returns the following information: (array) An array of job statistics objects. For more information, see <>. +[[ml-get-job-stats-response-codes]] +==== {api-response-codes-title} -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - +`404` (Missing resources):: + If `allow_no_jobs` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. -==== Examples +[[ml-get-job-stats-example]] +==== {api-examples-title} The following example gets usage information for the `farequote` job: diff --git a/docs/reference/ml/apis/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc index 4eb7eaf5a7f07..e9601a87ec7d7 100644 --- a/docs/reference/ml/apis/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -8,8 +8,8 @@ Retrieves configuration information for jobs. - -==== Request +[[ml-get-job-request]] +==== {api-request-title} `GET _ml/anomaly_detectors/` + @@ -19,8 +19,15 @@ Retrieves configuration information for jobs. `GET _ml/anomaly_detectors/_all` +[[ml-get-job-prereqs]] +==== {api-prereq-title} -===== Description +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[[ml-get-job-desc]] +==== {api-description-title} You can get information for multiple jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can @@ -29,15 +36,33 @@ get information for all jobs by using `_all`, by specifying `*` as the IMPORTANT: This API returns a maximum of 10,000 jobs. - -==== Path Parameters +[[ml-get-job-path-parms]] +==== {api-path-parms-title} `job_id`:: (string) Identifier for the job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all jobs. -==== Results +[[ml-get-job-query-parms]] +==== {api-query-parms-title} + +`allow_no_jobs`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- + +[[ml-get-job-results]] +==== {api-response-body-title} The API returns the following information: @@ -45,15 +70,15 @@ The API returns the following information: (array) An array of job resources. For more information, see <>. +[[ml-get-job-response-codes]] +==== {api-response-codes-title} -==== Authorization - -You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster -privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - +`404` (Missing resources):: + If `allow_no_jobs` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. -==== Examples +[[ml-get-job-example]] +==== {api-examples-title} The following example gets configuration information for the `total-requests` job: diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index 67484c0073179..00c57c8cf7f5b 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[get-ml-info]] === Get machine learning info API + +[subs="attributes"] ++++ Get {ml} info ++++ @@ -58,4 +60,4 @@ This is a possible response: "limits" : { } } ---- -// TESTRESPONSE +// TESTRESPONSE[s/"upgrade_mode": false/"upgrade_mode": $body.upgrade_mode/] diff --git a/docs/reference/ml/apis/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc index d8592e6516bbb..3ec6c70ed3ab2 100644 --- a/docs/reference/ml/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -22,24 +22,21 @@ bucket results of multiple jobs. You can summarize the bucket results for all jobs by using `_all` or by specifying `*` as the ``. -An overall bucket has a span equal to the largest `bucket_span` value for the -specified jobs. - -The `overall_score` is calculated by combining the scores of all -the buckets within the overall bucket span. First, the maximum `anomaly_score` per -job in the overall bucket is calculated. Then the `top_n` of those scores are +By default, an overall bucket has a span equal to the largest bucket span of the +specified jobs. To override that behavior, use the optional +`bucket_span` parameter. To learn more about the concept of buckets, see +{stack-ov}/ml-buckets.html[Buckets]. + +The `overall_score` is calculated by combining the scores of all the buckets +within the overall bucket span. First, the maximum `anomaly_score` per job in +the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune -the `overall_score` so that it is more or less sensitive to the number -of jobs that detect an anomaly at the same time. For example, if you set `top_n` -to `1`, the `overall_score` is the maximum bucket -score in the overall bucket. Alternatively, if you set `top_n` to the number of -jobs, the `overall_score` is high only when all jobs detect anomalies in that -overall bucket. - -In addition, the optional parameter `bucket_span` may be used in order -to request overall buckets that span longer than the largest job's `bucket_span`. -When set, the `overall_score` will be the max `overall_score` of the corresponding -overall buckets with a span equal to the largest job's `bucket_span`. +the `overall_score` so that it is more or less sensitive to the number of jobs +that detect an anomaly at the same time. For example, if you set `top_n` +to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is +high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the +`overall_score` is the maximum `overall_score` of the overall buckets that have +a span equal to the jobs' largest bucket span. ==== Path Parameters @@ -55,8 +52,8 @@ overall buckets with a span equal to the largest job's `bucket_span`. `bucket_span`:: (string) The span of the overall buckets. Must be greater or equal - to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. - + to the largest bucket span of the specified jobs, which is the default value. + `end`:: (string) Returns overall buckets with timestamps earlier than this time. diff --git a/docs/reference/ml/apis/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc index 4935a6e2d238f..8b6e30197314e 100644 --- a/docs/reference/ml/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -67,9 +67,6 @@ privileges to use this API. For more information, see ==== Examples -The following example gets model snapshot information for the -`it_ops_new_logs` job: - [source,js] -------------------------------------------------- GET _ml/anomaly_detectors/farequote/model_snapshots diff --git a/docs/reference/ml/apis/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc index c2e3aebb1a0a1..ad4e7ee85527c 100644 --- a/docs/reference/ml/apis/jobcounts.asciidoc +++ b/docs/reference/ml/apis/jobcounts.asciidoc @@ -61,8 +61,7 @@ or old results are deleted, the job counts are not reset. (long) The number of bucket results produced by the job. `earliest_record_timestamp`:: - (string) The timestamp of the earliest chronologically ordered record. - The datetime string is in ISO 8601 format. + (date) The timestamp of the earliest chronologically input document. `empty_bucket_count`:: (long) The number of buckets which did not contain any data. If your data contains many @@ -86,13 +85,13 @@ or old results are deleted, the job counts are not reset. (string) A unique identifier for the job. `last_data_time`:: - (datetime) The timestamp at which data was last analyzed, according to server time. + (date) The timestamp at which data was last analyzed, according to server time. `latest_empty_bucket_timestamp`:: (date) The timestamp of the last bucket that did not contain any data. `latest_record_timestamp`:: - (date) The timestamp of the last processed record. + (date) The timestamp of the latest chronologically input document. `latest_sparse_bucket_timestamp`:: (date) The timestamp of the last bucket that was considered sparse. diff --git a/docs/reference/ml/apis/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc index 616f9d704ffdc..5d122a5d6d1a8 100644 --- a/docs/reference/ml/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -27,7 +27,9 @@ of which must have a start time, end time, and description. ==== Request Body `events`:: - (array) A list of one of more scheduled events. See <>. + (array) A list of one of more scheduled events. The event's start and end times + may be specified as integer milliseconds since the epoch or as a string in ISO 8601 + format. See <>. ==== Authorization diff --git a/docs/reference/ml/apis/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc index 7eca456c981dd..83af6a78057cc 100644 --- a/docs/reference/ml/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-preview-datafeed]] === Preview {dfeeds} API + +[subs="attributes"] ++++ Preview {dfeeds} ++++ diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 18c611e97cac1..2e0f6700191cd 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-put-datafeed]] === Create {dfeeds} API + +[subs="attributes"] ++++ Create {dfeeds} ++++ @@ -19,6 +21,11 @@ Instantiates a {dfeed}. You must create a job before you create a {dfeed}. You can associate only one {dfeed} to each job. +IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {dfeed} + directly to the `.ml-config` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.ml-config` index. + ==== Path Parameters @@ -38,6 +45,11 @@ You must create a job before you create a {dfeed}. You can associate only one (object) Specifies how data searches are split into time chunks. See <>. +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See + <>. + `frequency`:: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short @@ -75,10 +87,6 @@ You must create a job before you create a {dfeed}. You can associate only one (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. -`delayed_data_check_config`:: - (object) Specifies if and with how large a window should the data feed check - for missing data. See <>. - For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index 4abeebee3e47a..e3d80c276dc55 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -12,7 +12,13 @@ Instantiates a job. `PUT _ml/anomaly_detectors/` -//===== Description +===== Description + +IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job + directly to the `.ml-config` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.ml-config` index. + ==== Path Parameters diff --git a/docs/reference/ml/apis/snapshotresource.asciidoc b/docs/reference/ml/apis/snapshotresource.asciidoc index 73e2e9b3b8bc1..13a26e26e8d70 100644 --- a/docs/reference/ml/apis/snapshotresource.asciidoc +++ b/docs/reference/ml/apis/snapshotresource.asciidoc @@ -3,7 +3,7 @@ [[ml-snapshot-resource]] === Model snapshot resources -Model snapshots are saved to disk periodically. +Model snapshots are saved to an internal index within the Elasticsearch cluster. By default, this is occurs approximately every 3 hours to 4 hours and is configurable with the `background_persist_interval` property. diff --git a/docs/reference/ml/apis/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc index 2ae92288a2666..aee237b72c837 100644 --- a/docs/reference/ml/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-start-datafeed]] === Start {dfeeds} API + +[subs="attributes"] ++++ Start {dfeeds} ++++ diff --git a/docs/reference/ml/apis/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc index c021d9ad18d62..d43335778eef0 100644 --- a/docs/reference/ml/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -2,6 +2,8 @@ [testenv="platinum"] [[ml-stop-datafeed]] === Stop {dfeeds} API + +[subs="attributes"] ++++ Stop {dfeeds} ++++ @@ -33,6 +35,22 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. +[[ml-stop-datafeed-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- ==== Request Body @@ -43,6 +61,12 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all (time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. +[[ml-stop-datafeed-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. ==== Authorization diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index 1e888f823ff72..59ed45cb7a615 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -2,24 +2,45 @@ [testenv="platinum"] [[ml-update-datafeed]] === Update {dfeeds} API + +[subs="attributes"] ++++ Update {dfeeds} ++++ Updates certain properties of a {dfeed}. -==== Request + +[[ml-update-datafeed-request]] +==== {api-request-title} `POST _ml/datafeeds//_update` -//===== Description -==== Path Parameters +[[ml-update-datafeed-prereqs]] +==== {api-prereq-title} + +* If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` +cluster privileges to use this API. For more information, see +{stack-ov}/security-privileges.html[Security privileges]. + + +[[ml-update-datafeed-desc]] +==== {api-description-title} + +If you update a {dfeed} property, you must stop and start the {dfeed} for the +change to be applied. -`feed_id` (required):: - (string) Identifier for the {dfeed} -==== Request Body +[[ml-update-datafeed-path-parms]] +==== {api-path-parms-title} + +`feed_id`:: + (Required, string) Identifier for the {dfeed}. + + +[[ml-update-datafeed-request-body]] +==== {api-request-body-title} The following properties can be updated after the {dfeed} is created: @@ -30,19 +51,24 @@ The following properties can be updated after the {dfeed} is created: `chunking_config`:: (object) Specifies how data searches are split into time chunks. See <>. + +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See <>. `frequency`:: - (time units) The interval at which scheduled queries are made while the - {dfeed} runs in real time. The default value is either the bucket span for short - bucket spans, or, for longer bucket spans, a sensible fraction of the bucket - span. For example: `150s`. + (Optional, <>) The interval at which scheduled queries + are made while the {dfeed} runs in real time. The default value is either the + bucket span for short bucket spans, or, for longer bucket spans, a sensible + fraction of the bucket span. For example: `150s`. `indices`:: (array) An array of index names. Wildcards are supported. For example: `["it_ops_metrics", "server*"]`. `job_id`:: - (string) A numerical character string that uniquely identifies the job. + (Optional, string) A numerical character string that uniquely identifies the + {anomaly-job}. `query`:: (object) The {es} query domain-specific language (DSL). This value @@ -50,11 +76,21 @@ The following properties can be updated after the {dfeed} is created: options that are supported by {es} can be used, as this object is passed verbatim to {es}. By default, this property has the following value: `{"match_all": {"boost": 1}}`. ++ +-- +WARNING: If you change the query, then the analyzed data will also be changed, +therefore the required time to learn might be long and the understandability of +the results is unpredictable. +If you want to make significant changes to the source data, we would recommend +you clone it and create a second job containing the amendments. Let both run in +parallel and close one when you are satisfied with the results of the other job. +-- `query_delay`:: - (time units) The number of seconds behind real-time that data is queried. For - example, if data from 10:04 a.m. might not be searchable in {es} until - 10:06 a.m., set this property to 120 seconds. The default value is `60s`. + (Optional, <>) The number of seconds behind real-time + that data is queried. For example, if data from 10:04 a.m. might not be + searchable in {es} until 10:06 a.m., set this property to 120 seconds. The + default value is `60s`. `script_fields`:: (object) Specifies scripts that evaluate custom expressions and returns @@ -68,17 +104,10 @@ The following properties can be updated after the {dfeed} is created: (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. -For more information about these properties, -see <>. - - -==== Authorization - -If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` -cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. +For more information about these properties, see <>. +[[ml-update-datafeed-security]] ==== Security Integration When {es} {security-features} are enabled, your {dfeed} remembers which roles the @@ -86,7 +115,8 @@ user who updated it had at the time of update and runs the query using those same roles. -==== Examples +[[ml-update-datafeed-example]] +==== {api-examples-title} The following example updates the query for the `datafeed-total-requests` {dfeed} so that only log entries of error level are analyzed: @@ -129,4 +159,4 @@ with the updated values: } } ---- -// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] \ No newline at end of file diff --git a/docs/reference/ml/apis/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc index 3382e7fe34675..7672fe3cc8d44 100644 --- a/docs/reference/ml/apis/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -8,17 +8,19 @@ Updates certain properties of a job. -==== Request +[[ml-update-job-request]] +==== {api-request-title} `POST _ml/anomaly_detectors//_update` - -==== Path Parameters +[[ml-update-job-path-parms]] +==== {api-path-parms-title} `job_id` (required):: (string) Identifier for the job -==== Request Body +[[ml-update-job-request-body]] +==== {api-request-body-title} The following properties can be updated after the job is created: @@ -26,8 +28,11 @@ The following properties can be updated after the job is created: |======================================================================= |Name |Description |Requires Restart -|`analysis_limits`: `model_memory_limit` |The approximate maximum amount of -memory resources required for analytical processing. See <>. | Yes +|`analysis_limits`: `model_memory_limit` |The approximate maximum amount of +memory resources required for analytical processing. See <>. You +can update the `analysis_limits` only while the job is closed. The +`model_memory_limit` property value cannot be decreased below the current usage. +| Yes |`background_persist_interval` |Advanced configuration option. The time between each periodic persistence of the model. See <>. | Yes @@ -36,11 +41,18 @@ each periodic persistence of the model. See <>. | Yes |`description` |A description of the job. See <>. | No -|`detectors` |An array of <>. | No +|`detectors` |An array of detector update objects. | No + +|`detector_index` |The identifier of the detector to update (integer).| No + +|`detectors.description` |The new description for the detector.| No + +|`detectors.custom_rules` |The new list of <> +for the detector. | No |`groups` |A list of job groups. See <>. | No -|`model_plot_config`: `enabled` |If true, enables calculation and storage of the +|`model_plot_config.enabled` |If true, enables calculation and storage of the model bounds for each entity that is being analyzed. See <>. | No @@ -63,37 +75,20 @@ effect. [NOTE] -- -* You can update the `analysis_limits` only while the job is closed. -* The `model_memory_limit` property value cannot be decreased below the current usage. * If the `memory_status` property in the `model_size_stats` object has a value of `hard_limit`, this means that it was unable to process some data. You might want to re-run this job with an increased `model_memory_limit`. -- -[[ml-detector-update]] -==== Detector Update Objects - -A detector update object has the following properties: - -`detector_index`:: - (integer) The identifier of the detector to update. - -`description`:: - (string) The new description for the detector. - -`custom_rules`:: - (array) The new list of <> for the detector. - -No other detector property can be updated. - -==== Authorization +[[ml-update-job-prereqs]] +==== {api-prereq-title} You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. - +{xpack-ref}/security-privileges.html[Security privileges]. -==== Examples +[[ml-update-job-example]] +==== {api-examples-title} The following example updates the `total-requests` job: diff --git a/docs/reference/ml/configuring.asciidoc b/docs/reference/ml/configuring.asciidoc index 9304a93d360c7..8e56c918ff11f 100644 --- a/docs/reference/ml/configuring.asciidoc +++ b/docs/reference/ml/configuring.asciidoc @@ -5,7 +5,7 @@ If you want to use {ml-features}, there must be at least one {ml} node in your cluster and all master-eligible nodes must have {ml} enabled. By default, all nodes are {ml} nodes. For more information about these settings, see -{ref}/modules-node.html#modules-node-xpack[{ml} nodes]. +{ref}/modules-node.html#ml-node[{ml} nodes]. To use the {ml-features} to analyze your data, you must create a job and send your data to that job. @@ -37,23 +37,16 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/delayed-data-detection.asciidoc include::delayed-data-detection.asciidoc[] \ No newline at end of file diff --git a/docs/reference/ml/functions.asciidoc b/docs/reference/ml/functions.asciidoc index 48e56bb4627ee..075d4459d3d86 100644 --- a/docs/reference/ml/functions.asciidoc +++ b/docs/reference/ml/functions.asciidoc @@ -43,23 +43,16 @@ These functions effectively ignore empty buckets. * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/count.asciidoc include::functions/count.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/geo.asciidoc include::functions/geo.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/info.asciidoc include::functions/info.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/metric.asciidoc include::functions/metric.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/rare.asciidoc include::functions/rare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/sum.asciidoc include::functions/sum.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/time.asciidoc include::functions/time.asciidoc[] diff --git a/docs/reference/ml/functions/rare.asciidoc b/docs/reference/ml/functions/rare.asciidoc index 87c212fbd1275..f370ce7849e6a 100644 --- a/docs/reference/ml/functions/rare.asciidoc +++ b/docs/reference/ml/functions/rare.asciidoc @@ -128,7 +128,8 @@ one or more rare URI paths many times compared to the population is regarded as highly anomalous. This analysis is based on the count of interactions with rare URI paths, not the number of different URI path values. -NOTE: To define a URI path as rare, the analytics consider the number of -distinct values that occur and not the number of times the URI path occurs. -If a single client IP visits a single unique URI path, this is rare, even if it +NOTE: Defining a URI path as rare happens the same way as you can see in the +case of the status codes above: the analytics consider the number of distinct +values that occur and not the number of times the URI path occurs. If a single +client IP visits a single unique URI path, this is rare, even if it occurs for that client IP in every bucket. diff --git a/docs/reference/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc index a3e7df9fdf27a..e49b1ff32d4f2 100644 --- a/docs/reference/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -134,7 +134,7 @@ PUT _ml/datafeeds/datafeed-test1 "total_error_count": { <2> "script": { "lang": "expression", - "inline": "doc['error_count'].value + doc['aborted_count'].value" + "source": "doc['error_count'].value + doc['aborted_count'].value" } } } @@ -239,7 +239,7 @@ PUT _ml/datafeeds/datafeed-test2 "my_script_field": { "script": { "lang": "painless", - "inline": "doc['some_field'].value + '_' + doc['another_field'].value" <2> + "source": "doc['some_field'].value + '_' + doc['another_field'].value" <2> } } } @@ -276,7 +276,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_script_field": { "script": { "lang": "painless", - "inline": "doc['another_field'].value.trim()" <1> + "source": "doc['another_field'].value.trim()" <1> } } } @@ -312,7 +312,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_script_field": { "script": { "lang": "painless", - "inline": "doc['some_field'].value.toLowerCase()" <1> + "source": "doc['some_field'].value.toLowerCase()" <1> } } } @@ -349,7 +349,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_script_field": { "script": { "lang": "painless", - "inline": "doc['some_field'].value.substring(0, 1).toUpperCase() + doc['some_field'].value.substring(1).toLowerCase()" <1> + "source": "doc['some_field'].value.substring(0, 1).toUpperCase() + doc['some_field'].value.substring(1).toLowerCase()" <1> } } } @@ -386,7 +386,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_script_field": { "script": { "lang": "painless", - "inline": "/\\s/.matcher(doc['tokenstring2'].value).replaceAll('_')" <1> + "source": "/\\s/.matcher(doc['tokenstring2'].value).replaceAll('_')" <1> } } } @@ -422,7 +422,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_script_field": { "script": { "lang": "painless", - "inline": "def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); return m.find() ? m.group(1) + '_' + m.group(2) : '';" <1> + "source": "def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); return m.find() ? m.group(1) + '_' + m.group(2) : '';" <1> } } } @@ -554,7 +554,7 @@ PUT _ml/datafeeds/datafeed-test4 "script_fields": { "my_coordinates": { "script": { - "inline": "doc['coords.lat'].value + ',' + doc['coords.lon'].value", + "source": "doc['coords.lat'].value + ',' + doc['coords.lon'].value", "lang": "painless" } } diff --git a/docs/reference/modules.asciidoc b/docs/reference/modules.asciidoc index f8b6c2784a075..2db62c2075611 100644 --- a/docs/reference/modules.asciidoc +++ b/docs/reference/modules.asciidoc @@ -46,20 +46,10 @@ The modules in this section are: A Java node client joins the cluster, but doesn't hold data or act as a master node. -<>:: - - A built-in scripting language for Elasticsearch that's designed to be as secure as possible. - <>:: Using plugins to extend Elasticsearch. -<>:: - - Custom scripting available in Lucene Expressions, ad Groovy. You can also - write scripts in the built-in scripting language, - <>. - <>:: Backup your data with snapshot/restore. @@ -78,10 +68,10 @@ The modules in this section are: Remote clusters are used in features that work by connecting across clusters on the transport layer. -<>:: +<>:: - Cross cluster search enables executing search requests across more than one cluster without joining them and acts - as a federated client across them. + {ccs-cap} enables executing search requests across more than one cluster + without joining them and acts as a federated client across them. -- @@ -99,11 +89,8 @@ include::modules/network.asciidoc[] include::modules/node.asciidoc[] -:edit_url: include::modules/plugins.asciidoc[] -include::modules/scripting.asciidoc[] - include::modules/snapshots.asciidoc[] include::modules/threadpool.asciidoc[] diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index 186c8e8ee3837..31fed0ef09296 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -1,15 +1,24 @@ [[modules-cross-cluster-search]] -== Cross Cluster Search +== {ccs-cap} -The _cross cluster search_ feature allows any node to act as a federated client across -multiple clusters. A cross cluster search node won't join the remote cluster, instead -it connects to a remote cluster in a light fashion in order to execute -federated search requests. +*{ccs-cap}* lets you run a single search request against one or more +<>. For example, you can use a {ccs} to +filter and analyze log data stored on clusters in different data centers. + +IMPORTANT: {ccs-cap} requires <>. + +[float] +[[ccs-example]] +=== {ccs-cap} examples [float] -=== Using cross cluster search +[[ccs-remote-cluster-setup]] +==== Remote cluster setup + +To perform a {ccs}, you must have at least one remote cluster configured. -Cross-cluster search requires <>. +The following <> API request +adds three remote clusters:`cluster_one`, `cluster_two`, and `cluster_three`. [source,js] -------------------------------- @@ -40,10 +49,14 @@ PUT _cluster/settings -------------------------------- // CONSOLE // TEST[setup:host] -// TEST[s/127.0.0.1:9300/\${transport_host}/] +// TEST[s/127.0.0.1:930\d+/\${transport_host}/] -To search the `twitter` index on remote cluster `cluster_one` the index name -must be prefixed with the cluster alias separated by a `:` character: +[float] +[[ccs-search-remote-cluster]] +==== Search a single remote cluster + +The following <> API request searches the +`twitter` index on a single remote cluster, `cluster_one`. [source,js] -------------------------------------------------- @@ -60,6 +73,8 @@ GET /cluster_one:twitter/_search // TEST[continued] // TEST[setup:twitter] +The API returns the following response: + [source,js] -------------------------------------------------- { @@ -84,7 +99,7 @@ GET /cluster_one:twitter/_search "max_score": 1, "hits": [ { - "_index": "cluster_one:twitter", + "_index": "cluster_one:twitter", <1> "_type": "_doc", "_id": "0", "_score": 1, @@ -103,12 +118,22 @@ GET /cluster_one:twitter/_search // TESTRESPONSE[s/"max_score": 1/"max_score": "$body.hits.max_score"/] // TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] +<1> The search response body includes the name of the remote cluster in the +`_index` parameter. -Indices can also be searched with the same name on different clusters: +[float] +[[ccs-search-multi-remote-cluster]] +==== Search multiple remote clusters + +The following <> API request searches the `twitter` index on +three clusters: + +* Your local cluster +* Two remote clusters, `cluster_one` and `cluster_two` [source,js] -------------------------------------------------- -GET /cluster_one:twitter,twitter/_search +GET /twitter,cluster_one:twitter,cluster_two:twitter/_search { "query": { "match": { @@ -120,37 +145,34 @@ GET /cluster_one:twitter,twitter/_search // CONSOLE // TEST[continued] -Search results are disambiguated the same way as the indices are disambiguated in the request. Even if index names are -identical these indices will be treated as different indices when results are merged. All results retrieved from a -remote index -will be prefixed with their remote cluster name: +The API returns the following response: [source,js] -------------------------------------------------- { "took": 150, "timed_out": false, - "num_reduce_phases": 3, + "num_reduce_phases": 4, "_shards": { - "total": 2, - "successful": 2, + "total": 3, + "successful": 3, "failed": 0, "skipped": 0 }, "_clusters": { - "total": 2, - "successful": 2, + "total": 3, + "successful": 3, "skipped": 0 }, "hits": { "total" : { - "value": 2, + "value": 3, "relation": "eq" }, "max_score": 1, "hits": [ { - "_index": "cluster_one:twitter", + "_index": "cluster_one:twitter", <1> "_type": "_doc", "_id": "0", "_score": 1, @@ -162,7 +184,19 @@ will be prefixed with their remote cluster name: } }, { - "_index": "twitter", + "_index": "cluster_two:twitter", <2> + "_type": "_doc", + "_id": "0", + "_score": 1, + "_source": { + "user": "kimchy", + "date": "2009-11-15T14:12:12", + "message": "trying out Elasticsearch", + "likes": 0 + } + }, + { + "_index": "twitter", <3> "_type": "_doc", "_id": "0", "_score": 2, @@ -182,136 +216,125 @@ will be prefixed with their remote cluster name: // TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] // TESTRESPONSE[s/"_score": 2/"_score": "$body.hits.hits.1._score"/] +<1> This document came from `cluster_one`. +<2> This document came from `cluster_two`. +<3> This document's `_index` parameter doesn't include a cluster name. This +means the document came from the local cluster. + + [float] -=== Skipping disconnected clusters +[[_skipping_disconnected_clusters]] +=== Skip unavailable clusters + +By default, a {ccs} returns an error if *any* cluster in the request is +unavailable. + +To skip an unavailable cluster during a {ccs}, set the +<> cluster setting to `true`. -By default all remote clusters that are searched via Cross Cluster Search need to be available when -the search request is executed, otherwise the whole request fails and no search results are returned -despite some of the clusters are available. Remote clusters can be made optional through the -boolean `skip_unavailable` setting, set to `false` by default. +The following <> API request +changes `cluster_two`'s `skip_unavailable` setting to `true`. [source,js] -------------------------------- PUT _cluster/settings { "persistent": { - "cluster.remote.cluster_two.skip_unavailable": true <1> + "cluster.remote.cluster_two.skip_unavailable": true } } -------------------------------- // CONSOLE // TEST[continued] -<1> `cluster_two` is made optional -[source,js] --------------------------------------------------- -GET /cluster_one:twitter,cluster_two:twitter,twitter/_search <1> -{ - "query": { - "match": { - "user": "kimchy" - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] -<1> Search against the `twitter` index in `cluster_one`, `cluster_two` and also locally +If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't +include matching documents from that cluster in the final results. -[source,js] --------------------------------------------------- -{ - "took": 150, - "timed_out": false, - "num_reduce_phases": 3, - "_shards": { - "total": 2, - "successful": 2, - "failed": 0, - "skipped": 0 - }, - "_clusters": { <1> - "total": 3, - "successful": 2, - "skipped": 1 - }, - "hits": { - "total" : { - "value": 2, - "relation": "eq" - }, - "max_score": 1, - "hits": [ - { - "_index": "cluster_one:twitter", - "_type": "_doc", - "_id": "0", - "_score": 1, - "_source": { - "user": "kimchy", - "date": "2009-11-15T14:12:12", - "message": "trying out Elasticsearch", - "likes": 0 - } - }, - { - "_index": "twitter", - "_type": "_doc", - "_id": "0", - "_score": 2, - "_source": { - "user": "kimchy", - "date": "2009-11-15T14:12:12", - "message": "trying out Elasticsearch", - "likes": 0 - } - } - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took": 150/"took": "$body.took"/] -// TESTRESPONSE[s/"max_score": 1/"max_score": "$body.hits.max_score"/] -// TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] -// TESTRESPONSE[s/"_score": 2/"_score": "$body.hits.hits.1._score"/] -<1> The `clusters` section indicates that one cluster was unavailable and got skipped +[float] +[[ccs-works]] +=== How {ccs} works +Because {ccs} involves sending requests to remote clusters, any network delays +can impact search speed. To avoid slow searches, {ccs} offers two options for +handling network delays: + +<>:: +By default, {es} reduces the number of network roundtrips between remote +clusters. This reduces the impact of network delays on search speed. However, +{es} can't reduce network roundtrips for large search requests, such as those +including a <> or +<>. ++ +See <> to learn how this option works. + +<>:: +For search requests that include a scroll or inner hits, {es} sends multiple +outgoing and ingoing requests to each remote cluster. You can also choose this +option by setting the <> API's +<> parameter to `false`. +While typically slower, this approach may work well for networks with low +latency. ++ +See <> to learn how this option works. + + + +[float] +[[ccs-min-roundtrips]] +==== Minimize network roundtrips + +Here's how {ccs} works when you minimize network roundtrips. + +. You send a {ccs} request to your local cluster. A coordinating node in that +cluster receives and parses the request. ++ +image:images/ccs/ccs-min-roundtrip-client-request.png[] + +. The coordinating node sends a single search request to each cluster, including +its own. Each cluster performs the search request independently. ++ +image:images/ccs/ccs-min-roundtrip-cluster-search.png[] + +. Each remote cluster sends its search results back to the coordinating node. ++ +image:images/ccs/ccs-min-roundtrip-cluster-results.png[] + +. After collecting results from each cluster, the coordinating node returns the +final results in the {ccs} response. ++ +image:images/ccs/ccs-min-roundtrip-client-response.png[] [float] -[[ccs-reduction]] -=== CCS reduction phase - -Cross-cluster search requests can be executed in two ways: - -- the CCS coordinating node minimizes network round-trips by sending one search -request to each cluster. Each cluster performs the search independently, -reducing and fetching results. Once the CCS node has received all the -responses, it performs another reduction and returns the relevant results back -to the user. This strategy is beneficial when there is network latency between -the CCS coordinating node and the remote clusters involved, which is typically -the case. A single request is sent to each remote cluster, at the cost of -retrieving `from` + `size` already fetched results. This is the default -strategy, used whenever possible. In case a scroll is provided, or inner hits -are requested as part of field collapsing, this strategy is not supported hence -network round-trips cannot be minimized and the following strategy is used -instead. - -- the CCS coordinating node sends a <> request to -each remote cluster, in order to collect information about their corresponding -remote indices involved in the search request and the shards where their data -is located. Once each cluster has responded to such request, the search -executes as if all shards were part of the same cluster. The coordinating node -sends one request to each shard involved, each shard executes the query and -returns its own results which are then reduced (and fetched, depending on the -<>) by the CCS coordinating node. -This strategy may be beneficial whenever there is very low network latency -between the CCS coordinating node and the remote clusters involved, as it -treats all shards the same, at the cost of sending many requests to each remote -cluster, which is problematic in presence of network latency. - -The <> supports the `ccs_minimize_roundtrips` -parameter, which defaults to `true` and can be set to `false` in case -minimizing network round-trips is not desirable. - -Note that all the communication between the nodes, regardless of which cluster -they belong to and the selected reduce mode, happens through the -<>. +[[ccs-unmin-roundtrips]] +==== Don't minimize network roundtrips + +Here's how {ccs} works when you don't minimize network roundtrips. + +. You send a {ccs} request to your local cluster. A coordinating node in that +cluster receives and parses the request. ++ +image:images/ccs/ccs-min-roundtrip-client-request.png[] + +. The coordinating node sends a <> API request to +each remote cluster. ++ +image:images/ccs/ccs-min-roundtrip-cluster-search.png[] + +. Each remote cluster sends its response back to the coordinating node. +This response contains information about the indices and shards the {ccs} +request will be executed on. ++ +image:images/ccs/ccs-min-roundtrip-cluster-results.png[] + +. The coordinating node sends a search request to each shard, including those in +its own cluster. Each shard performs the search request independently. ++ +image:images/ccs/ccs-dont-min-roundtrip-shard-search.png[] + +. Each shard sends its search results back to the coordinating node. ++ +image:images/ccs/ccs-dont-min-roundtrip-shard-results.png[] + +. After collecting results from each cluster, the coordinating node returns the +final results in the {ccs} response. ++ +image:images/ccs/ccs-min-roundtrip-client-response.png[] diff --git a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc index ccc0e99125371..2a62bb5e49dd3 100644 --- a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc +++ b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc @@ -35,30 +35,36 @@ four of the nodes at once: to do so would leave only three nodes remaining, which is less than half of the voting configuration, which means the cluster cannot take any further actions. +More precisely, if you shut down half or more of the master-eligible nodes all +at the same time then the cluster will normally become unavailable. If this +happens then you can bring the cluster back online by starting the removed +nodes again. + As long as there are at least three master-eligible nodes in the cluster, as a general rule it is best to remove nodes one-at-a-time, allowing enough time for the cluster to <> the voting configuration and adapt the fault tolerance level to the new set of nodes. If there are only two master-eligible nodes remaining then neither node can be -safely removed since both are required to reliably make progress. You must first -inform Elasticsearch that one of the nodes should not be part of the voting -configuration, and that the voting power should instead be given to other nodes. -You can then take the excluded node offline without preventing the other node -from making progress. A node which is added to a voting configuration exclusion -list still works normally, but Elasticsearch tries to remove it from the voting -configuration so its vote is no longer required. Importantly, Elasticsearch -will never automatically move a node on the voting exclusions list back into the -voting configuration. Once an excluded node has been successfully +safely removed since both are required to reliably make progress. To remove one +of these nodes you must first inform {es} that it should not be part of the +voting configuration, and that the voting power should instead be given to the +other node. You can then take the excluded node offline without preventing the +other node from making progress. A node which is added to a voting +configuration exclusion list still works normally, but {es} tries to remove it +from the voting configuration so its vote is no longer required. Importantly, +{es} will never automatically move a node on the voting exclusions list back +into the voting configuration. Once an excluded node has been successfully auto-reconfigured out of the voting configuration, it is safe to shut it down without affecting the cluster's master-level availability. A node can be added -to the voting configuration exclusion list using the <> API. For example: +to the voting configuration exclusion list using the +<> API. For example: [source,js] -------------------------------------------------- -# Add node to voting configuration exclusions list and wait for the system to -# auto-reconfigure the node out of the voting configuration up to the default -# timeout of 30 seconds +# Add node to voting configuration exclusions list and wait for the system +# to auto-reconfigure the node out of the voting configuration up to the +# default timeout of 30 seconds POST /_cluster/voting_config_exclusions/node_name # Add node to voting configuration exclusions list and wait for diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc index 5be2e170cc3b6..ba6cdc7127492 100644 --- a/docs/reference/modules/discovery/bootstrapping.asciidoc +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -4,21 +4,34 @@ Starting an Elasticsearch cluster for the very first time requires the initial set of <> to be explicitly defined on one or more of the master-eligible nodes in the cluster. This is known as _cluster -bootstrapping_. This is only required the very first time the cluster starts -up: nodes that have already joined a cluster store this information in their -data folder and freshly-started nodes that are joining an existing cluster -obtain this information from the cluster's elected master. - -The initial set of master-eligible nodes is defined in the -<>. When you -start a master-eligible node, you can provide this setting on the command line -or in the `elasticsearch.yml` file. After the cluster has formed, this setting -is no longer required and is ignored. It need not be set -on master-ineligible nodes, nor on master-eligible nodes that are started to -join an existing cluster. Note that master-eligible nodes should use storage -that persists across restarts. If they do not, and -`cluster.initial_master_nodes` is set, and a full cluster restart occurs, then -another brand-new cluster will form and this may result in data loss. +bootstrapping_. This is only required the first time a cluster starts up: nodes +that have already joined a cluster store this information in their data folder +for use in a <>, and freshly-started nodes +that are joining a running cluster obtain this information from the cluster's +elected master. + +The initial set of master-eligible nodes is defined in the +<>. This should be +set to a list containing one of the following items for each master-eligible +node: + +- The <> of the node. +- The node's hostname if `node.name` is not set, because `node.name` defaults + to the node's hostname. You must use either the fully-qualified hostname or + the bare hostname <>. +- The IP address of the node's <>, if it is + not possible to use the `node.name` of the node. This is normally the IP + address to which <> resolves but + <>. +- The IP address and port of the node's publish address, in the form `IP:PORT`, + if it is not possible to use the `node.name` of the node and there are + multiple nodes sharing a single IP address. + +When you start a master-eligible node, you can provide this setting on the +command line or in the `elasticsearch.yml` file. After the cluster has formed, +this setting is no longer required. It should not be set for master-ineligible +nodes, master-eligible nodes joining an existing cluster, or cluster restarts. It is technically sufficient to set `cluster.initial_master_nodes` on a single master-eligible node in the cluster, and only to mention that single node in the @@ -27,10 +40,6 @@ fully formed. It is therefore better to bootstrap using at least three master-eligible nodes, each with a `cluster.initial_master_nodes` setting containing all three nodes. -NOTE: In alpha releases, all listed master-eligible nodes are required to be -discovered before bootstrapping can take place. This requirement will be relaxed -in production-ready releases. - WARNING: You must set `cluster.initial_master_nodes` to the same list of nodes on each node on which it is set in order to be sure that only a single cluster forms during bootstrapping and therefore to avoid the risk of data loss. @@ -46,20 +55,6 @@ cluster.initial_master_nodes: - master-c -------------------------------------------------- -Alternatively the IP addresses or hostnames (<>) can be used. If there is more than one Elasticsearch node -with the same IP address or hostname then the transport ports must also be given -to specify exactly which node is meant: - -[source,yaml] --------------------------------------------------- -cluster.initial_master_nodes: - - 10.0.10.101 - - 10.0.10.102:9300 - - 10.0.10.102:9301 - - master-node-hostname --------------------------------------------------- - Like all node settings, it is also possible to specify the initial set of master nodes on the command-line that is used to start Elasticsearch: @@ -68,6 +63,37 @@ nodes on the command-line that is used to start Elasticsearch: $ bin/elasticsearch -Ecluster.initial_master_nodes=master-a,master-b,master-c -------------------------------------------------- +[NOTE] +================================================== + +[[modules-discovery-bootstrap-cluster-fqdns]] The node names used in the +`cluster.initial_master_nodes` list must exactly match the `node.name` +properties of the nodes. By default the node name is set to the machine's +hostname which may or may not be fully-qualified depending on your system +configuration. If each node name is a fully-qualified domain name such as +`master-a.example.com` then you must use fully-qualified domain names in the +`cluster.initial_master_nodes` list too; conversely if your node names are bare +hostnames (without the `.example.com` suffix) then you must use bare hostnames +in the `cluster.initial_master_nodes` list. If you use a mix of fully-qualifed +and bare hostnames, or there is some other mismatch between `node.name` and +`cluster.initial_master_nodes`, then the cluster will not form successfully and +you will see log messages like the following. + +[source,text] +-------------------------------------------------- +[master-a.example.com] master not discovered yet, this node has +not previously joined a bootstrapped (v7+) cluster, and this +node must discover master-eligible nodes [master-a, master-b] to +bootstrap a cluster: have discovered [{master-b.example.com}{... +-------------------------------------------------- + +This message shows the node names `master-a.example.com` and +`master-b.example.com` as well as the `cluster.initial_master_nodes` entries +`master-a` and `master-b`, and it is clear from this message that they do not +match exactly. + +================================================== + [float] ==== Choosing a cluster name @@ -97,3 +123,29 @@ in the <>: * `discovery.seed_providers` * `discovery.seed_hosts` * `cluster.initial_master_nodes` + +[NOTE] +================================================== + +[[modules-discovery-bootstrap-cluster-joining]] If you start an {es} node +without configuring these settings then it will start up in development mode and +auto-bootstrap itself into a new cluster. If you start some {es} nodes on +different hosts then by default they will not discover each other and will form +a different cluster on each host. {es} will not merge separate clusters together +after they have formed, even if you subsequently try and configure all the nodes +into a single cluster. This is because there is no way to merge these separate +clusters together without a risk of data loss. You can tell that you have formed +separate clusters by checking the cluster UUID reported by `GET /` on each node. +If you intended to form a single cluster then you should start again: + +* Take a <> of each of the single-host clusters if + you do not want to lose any data that they hold. Note that each cluster must + use its own snapshot repository. +* Shut down all the nodes. +* Completely wipe each node by deleting the contents of their + <>. +* Configure `cluster.initial_master_nodes` as described above. +* Restart all the nodes and verify that they have formed a single cluster. +* <> any snapshots as required. + +================================================== diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc index 3386fd66b499f..32bfd0bf63681 100644 --- a/docs/reference/modules/discovery/discovery-settings.asciidoc +++ b/docs/reference/modules/discovery/discovery-settings.asciidoc @@ -25,6 +25,30 @@ Discovery and cluster formation are affected by the following settings: compatibility. Support for the old name will be removed in a future version. +`discovery.type`:: + + Specifies whether {es} should form a multiple-node cluster. By default, {es} + discovers other nodes when forming a cluster and allows other nodes to join + the cluster later. If `discovery.type` is set to `single-node`, {es} forms a + single-node cluster. For more information about when you might use this + setting, see <>. + +`cluster.initial_master_nodes`:: + + Sets the initial set of master-eligible nodes in a brand-new cluster. By + default this list is empty, meaning that this node expects to join a cluster + that has already been bootstrapped. See <>. + +[float] +==== Expert settings + +Discovery and cluster formation are also affected by the following +_expert-level_ settings, although it is not recommended to change any of these +from their default values. + +WARNING: If you adjust these settings then your cluster may not form correctly +or may become unstable or intolerant of certain failures. + `discovery.cluster_formation_warning_timeout`:: Sets how long a node will try to form a cluster before logging a warning @@ -49,6 +73,7 @@ Discovery and cluster formation are affected by the following settings: handshake. Defaults to `1s`. `discovery.request_peers_timeout`:: + Sets how long a node will wait after asking its peers again before considering the request to have failed. Defaults to `3s`. @@ -83,73 +108,78 @@ Discovery and cluster formation are affected by the following settings: Sets the amount to increase the upper bound on the wait before an election on each election failure. Note that this is _linear_ backoff. This defaults - to `100ms` + to `100ms`. Changing this setting from the default may cause your cluster + to fail to elect a master node. `cluster.election.duration`:: - Sets how long each election is allowed to take before a node considers it to - have failed and schedules a retry. This defaults to `500ms`. + Sets how long each election is allowed to take before a node considers it + to have failed and schedules a retry. This defaults to `500ms`. Changing + this setting from the default may cause your cluster to fail to elect a + master node. `cluster.election.initial_timeout`:: Sets the upper bound on how long a node will wait initially, or after the elected master fails, before attempting its first election. This defaults - to `100ms`. - + to `100ms`. Changing this setting from the default may cause your cluster + to fail to elect a master node. `cluster.election.max_timeout`:: Sets the maximum upper bound on how long a node will wait before attempting an first election, so that an network partition that lasts for a long time - does not result in excessively sparse elections. This defaults to `10s` + does not result in excessively sparse elections. This defaults to `10s`. + Changing this setting from the default may cause your cluster to fail to + elect a master node. [[fault-detection-settings]]`cluster.fault_detection.follower_check.interval`:: Sets how long the elected master waits between follower checks to each - other node in the cluster. Defaults to `1s`. + other node in the cluster. Defaults to `1s`. Changing this setting from the + default may cause your cluster to become unstable. `cluster.fault_detection.follower_check.timeout`:: Sets how long the elected master waits for a response to a follower check - before considering it to have failed. Defaults to `10s`. + before considering it to have failed. Defaults to `10s`. Changing this + setting from the default may cause your cluster to become unstable. `cluster.fault_detection.follower_check.retry_count`:: Sets how many consecutive follower check failures must occur to each node before the elected master considers that node to be faulty and removes it - from the cluster. Defaults to `3`. + from the cluster. Defaults to `3`. Changing this setting from the default + may cause your cluster to become unstable. `cluster.fault_detection.leader_check.interval`:: Sets how long each node waits between checks of the elected master. - Defaults to `1s`. + Defaults to `1s`. Changing this setting from the default may cause your + cluster to become unstable. `cluster.fault_detection.leader_check.timeout`:: Sets how long each node waits for a response to a leader check from the elected master before considering it to have failed. Defaults to `10s`. + Changing this setting from the default may cause your cluster to become + unstable. `cluster.fault_detection.leader_check.retry_count`:: Sets how many consecutive leader check failures must occur before a node considers the elected master to be faulty and attempts to find or elect a - new master. Defaults to `3`. + new master. Defaults to `3`. Changing this setting from the default may + cause your cluster to become unstable. `cluster.follower_lag.timeout`:: Sets how long the master node waits to receive acknowledgements for cluster - state updates from lagging nodes. The default value is `90s`. If a node does - not successfully apply the cluster state update within this period of time, - it is considered to have failed and is removed from the cluster. See + state updates from lagging nodes. The default value is `90s`. If a node + does not successfully apply the cluster state update within this period of + time, it is considered to have failed and is removed from the cluster. See <>. -`cluster.initial_master_nodes`:: - - Sets a list of the <> or transport addresses of the - initial set of master-eligible nodes in a brand-new cluster. By default - this list is empty, meaning that this node expects to join a cluster that - has already been bootstrapped. See <>. - `cluster.join.timeout`:: Sets how long a node will wait after sending a request to join a cluster @@ -165,8 +195,7 @@ Discovery and cluster formation are affected by the following settings: `cluster.publish.timeout`:: Sets how long the master node waits for each cluster state update to be - completely published to all nodes. The default value is `30s`. If this - period of time elapses, the cluster state change is rejected. See + completely published to all nodes. The default value is `30s`. See <>. [[no-master-block]]`cluster.no_master_block`:: diff --git a/docs/reference/modules/discovery/discovery.asciidoc b/docs/reference/modules/discovery/discovery.asciidoc index 2fa4e14701479..e7d34c481d174 100644 --- a/docs/reference/modules/discovery/discovery.asciidoc +++ b/docs/reference/modules/discovery/discovery.asciidoc @@ -11,10 +11,10 @@ This process starts with a list of _seed_ addresses from one or more of any master-eligible nodes that were in the last-known cluster. The process operates in two phases: First, each node probes the seed addresses by connecting to each address and attempting to identify the node to which it is -connected. Secondly it shares with the remote node a list of all of its known -master-eligible peers and the remote node responds with _its_ peers in turn. -The node then probes all the new nodes that it just discovered, requests their -peers, and so on. +connected and to verify that it is master-eligible. Secondly, if successful, it +shares with the remote node a list of all of its known master-eligible peers +and the remote node responds with _its_ peers in turn. The node then probes all +the new nodes that it just discovered, requests their peers, and so on. If the node is not master-eligible then it continues this discovery process until it has discovered an elected master node. If no elected master is diff --git a/docs/reference/modules/discovery/publishing.asciidoc b/docs/reference/modules/discovery/publishing.asciidoc index 46a543f0ce710..8452f0cd04f87 100644 --- a/docs/reference/modules/discovery/publishing.asciidoc +++ b/docs/reference/modules/discovery/publishing.asciidoc @@ -34,6 +34,15 @@ which defaults to `90s`. If a node has still not successfully applied the cluster state update within this time then it is considered to have failed and is removed from the cluster. +Cluster state updates are typically published as diffs to the previous cluster +state, which reduces the time and network bandwidth needed to publish a cluster +state update. For example, when updating the mappings for only a subset of the +indices in the cluster state, only the updates for those indices need to be +published to the nodes in the cluster, as long as those nodes have the previous +cluster state. If a node is missing the previous cluster state, for example +when rejoining a cluster, the master will publish the full cluster state to +that node so that it can receive future updates as diffs. + NOTE: {es} is a peer to peer based system, in which nodes communicate with one another directly. The high-throughput APIs (index, delete, search) do not normally interact with the master node. The responsibility of the master node diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index 038a4b24a853f..2b0783c9de0b0 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -49,6 +49,7 @@ as long as the following conditions are met: NOTE: These settings only take effect on a full cluster restart. +[[modules-gateway-dangling-indices]] === Dangling indices When a node joins the cluster, any shards stored in its local data diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 523b2cb72e9d2..7969057330d72 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -6,8 +6,8 @@ causing an OutOfMemoryError. Each breaker specifies a limit for how much memory it can use. Additionally, there is a parent-level breaker that specifies the total amount of memory that can be used across all breakers. -These settings can be dynamically updated on a live cluster with the -<> API. +Except where noted otherwise, these settings can be dynamically updated on a +live cluster with the <> API. [[parent-circuit-breaker]] [float] @@ -17,8 +17,9 @@ The parent-level breaker can be configured with the following settings: `indices.breaker.total.use_real_memory`:: - Whether the parent breaker should take real memory usage into account (`true`) or only - consider the amount that is reserved by child circuit breakers (`false`). Defaults to `true`. + _Static_ setting determining whether the parent breaker should take real + memory usage into account (`true`) or only consider the amount that is + reserved by child circuit breakers (`false`). Defaults to `true`. `indices.breaker.total.limit`:: diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc index d9e85c27105c8..613d0885d1db7 100644 --- a/docs/reference/modules/indices/recovery.asciidoc +++ b/docs/reference/modules/indices/recovery.asciidoc @@ -1,33 +1,40 @@ [[recovery]] === Indices Recovery -<> is the process used to build a new copy of a -shard on a node by copying data from the primary. {es} uses this peer recovery -process to rebuild shard copies that were lost if a node has failed, and uses -the same process when migrating a shard copy between nodes to rebalance the -cluster or to honor any changes to the <>. - -The following _expert_ setting can be set to manage the resources consumed by -peer recoveries: - -`indices.recovery.max_bytes_per_sec`:: - Limits the total inbound and outbound peer recovery traffic on each node. - Since this limit applies on each node, but there may be many nodes - performing peer recoveries concurrently, the total amount of peer recovery - traffic within a cluster may be much higher than this limit. If you set - this limit too high then there is a risk that ongoing peer recoveries will - consume an excess of bandwidth (or other resources) which could destabilize - the cluster. Defaults to `40mb`. - -`indices.recovery.max_concurrent_file_chunks`:: - Controls the number of file chunk requests that can be sent in parallel per recovery. - As multiple recoveries are already running in parallel (controlled by - cluster.routing.allocation.node_concurrent_recoveries), increasing this expert-level - setting might only help in situations where peer recovery of a single shard is not - reaching the total inbound and outbound peer recovery traffic as configured by - indices.recovery.max_bytes_per_sec, but is CPU-bound instead, typically when using - transport-level security or compression. Defaults to `2`. - -This setting can be dynamically updated on a live cluster with the -<> API. +Peer recovery syncs data from a primary shard to a new or +existing shard copy. + +Peer recovery automatically occurs when {es}: + +* Recreates a shard lost during node failure +* Relocates a shard to another node due to a cluster rebalance or changes to the +<> + +You can view a list of in-progress and completed recoveries using the +<>. + +[float] +==== Peer recovery settings + +`indices.recovery.max_bytes_per_sec` (<>):: +Limits total inbound and outbound recovery traffic for each node. +Defaults to `40mb`. ++ +This limit applies to nodes only. If multiple nodes in a cluster perform +recoveries at the same time, the cluster's total recovery traffic may exceed +this limit. ++ +If this limit is too high, ongoing recoveries may consume an excess +of bandwidth and other resources, which can destabilize the cluster. + +[float] +==== Expert peer recovery settings +You can use the following _expert_ setting to manage resources for peer +recoveries. + +`indices.recovery.max_concurrent_file_chunks` (<>, Expert):: +Number of file chunk requests sent in parallel for each recovery. Defaults to +`2`. ++ +You can increase the value of this setting when the recovery of a single shard +is not reaching the traffic limit set by `indices.recovery.max_bytes_per_sec`. \ No newline at end of file diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index ad75de1291cdc..071471a1803ca 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -3,14 +3,15 @@ The following _expert_ setting can be set to manage global search limits. +[[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: Defaults to `1024`. This setting limits the number of clauses a Lucene BooleanQuery can have. The default of 1024 is quite high and should normally be sufficient. This limit does not only affect Elasticsearchs `bool` query, but many other queries are rewritten to Lucene's -BooleanQuery internally. The limit is in place to prevent searches from becoming to large -and taking up too much CPU and memory. In case you consider to increase this setting, -make sure you exhausted all other options to avoid having to do this. Higher values can lead +BooleanQuery internally. The limit is in place to prevent searches from becoming too large +and taking up too much CPU and memory. In case you're considering increasing this setting, +make sure you've exhausted all other options to avoid having to do this. Higher values can lead to performance degradations and memory issues, especially in clusters with a high load or few resources. diff --git a/docs/reference/modules/ml-node.asciidoc b/docs/reference/modules/ml-node.asciidoc deleted file mode 100644 index 5a907adfbbf3a..0000000000000 --- a/docs/reference/modules/ml-node.asciidoc +++ /dev/null @@ -1,116 +0,0 @@ -[float] -[[modules-node-xpack]] -== [xpack]#X-Pack node settings# - -//This content is referenced from the elastic/elasticsearch/docs/reference/modules/node.asciidoc - -If {xpack} is installed, there is an additional node type: - -<>:: - -A node that has `xpack.ml.enabled` and `node.ml` set to `true`, which is the -default behavior when {xpack} is installed. If you want to use {ml-features}, there must be at least one {ml} node in your cluster. For more -information about {ml-features}, -see {stack-ov}/xpack-ml.html[Machine learning in the {stack}]. - -IMPORTANT: Do not set use the `node.ml` setting unless {xpack} is installed. -Otherwise, the node fails to start. - -If {xpack} is installed, nodes are master-eligible, data, ingest, and {ml} -nodes by default. As the cluster grows and in particular if you have large -{ml} jobs, consider separating dedicated master-eligible nodes from dedicated -data nodes and dedicated {ml} nodes. - -To create a dedicated master-eligible node when {xpack} is installed, set: - -[source,yaml] -------------------- -node.master: true <1> -node.data: false <2> -node.ingest: false <3> -node.ml: false <4> -xpack.ml.enabled: true <5> -------------------- -<1> The `node.master` role is enabled by default. -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default in {xpack}). -<5> The `xpack.ml.enabled` setting is enabled by default in {xpack}. - -To create a dedicated data node when {xpack} is installed, set: - -[source,yaml] -------------------- -node.master: false <1> -node.data: true <2> -node.ingest: false <3> -node.ml: false <4> -------------------- -<1> Disable the `node.master` role (enabled by default). -<2> The `node.data` role is enabled by default. -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable the `node.ml` role (enabled by default in {xpack}). - -To create a dedicated ingest node when {xpack} is installed, set: - -[source,yaml] -------------------- -node.master: false <1> -node.data: false <2> -node.ingest: true <3> -cluster.remote.connect: false <4> -node.ml: false <5> -------------------- -<1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> The `node.ingest` role is enabled by default. -<4> Disable cross-cluster search (enabled by default). -<5> Disable the `node.ml` role (enabled by default in {xpack}). - -To create a dedicated coordinating node when {xpack} is installed, set: - -[source,yaml] -------------------- -node.master: false <1> -node.data: false <2> -node.ingest: false <3> -cluster.remote.connect: false <4> -node.ml: false <5> -------------------- -<1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable cross-cluster search (enabled by default). -<5> Disable the `node.ml` role (enabled by default in {xpack}). - -[float] -[[ml-node]] -=== [xpack]#Machine learning node# - -The {ml-features} provide {ml} nodes, which run jobs and handle {ml} API -requests. If `xpack.ml.enabled` is set to true and `node.ml` is set to `false`, -the node can service API requests but it cannot run jobs. - -If you want to use {ml-features} in your cluster, you must enable {ml} -(set `xpack.ml.enabled` to `true`) on all master-eligible nodes. Do not use -these settings if you do not have {xpack} installed. - -For more information about these settings, see <>. - -To create a dedicated {ml} node, set: - -[source,yaml] -------------------- -node.master: false <1> -node.data: false <2> -node.ingest: false <3> -cluster.remote.connect: false <4> -node.ml: true <5> -xpack.ml.enabled: true <6> -------------------- -<1> Disable the `node.master` role (enabled by default). -<2> Disable the `node.data` role (enabled by default). -<3> Disable the `node.ingest` role (enabled by default). -<4> Disable cross-cluster search (enabled by default). -<5> The `node.ml` role is enabled by default in {xpack}. -<6> The `xpack.ml.enabled` setting is enabled by default in {xpack}. diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index e60b56fe12d74..6c9e72c18be27 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -24,7 +24,9 @@ this host to other nodes in the cluster. Accepts an IP address, hostname, a <>, or an array of any combination of these. Note that any values containing a `:` (e.g., an IPv6 address or containing one of the <>) must be -quoted because `:` is a special character in YAML. +quoted because `:` is a special character in YAML. `0.0.0.0` is an acceptable +IP address and will bind to all network interfaces. The value `0` has the +same effect as the value `0.0.0.0`. + Defaults to `_local_`. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index a94f76c55de1f..51cd0073b0dab 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -2,7 +2,7 @@ == Node Any time that you start an instance of Elasticsearch, you are starting a -_node_. A collection of connected nodes is called a +_node_. A collection of connected nodes is called a <>. If you are running a single node of Elasticsearch, then you have a cluster of one node. @@ -13,8 +13,14 @@ is used exclusively for communication between nodes and the used only by external REST clients. All nodes know about all the other nodes in the cluster and can forward client -requests to the appropriate node. Besides that, each node serves one or more -purpose: +requests to the appropriate node. + +By default, a node is all of the following types: master-eligible, data, ingest, +and machine learning (if available). + +TIP: As the cluster grows and in particular if you have large {ml} jobs, +consider separating dedicated master-eligible nodes from dedicated data nodes +and dedicated {ml} nodes. <>:: @@ -35,6 +41,17 @@ and enrich the document before indexing. With a heavy ingest load, it makes sense to use dedicated ingest nodes and to mark the master and data nodes as `node.ingest: false`. +<>:: + +A node that has `xpack.ml.enabled` and `node.ml` set to `true`, which is the +default behavior in the {es} {default-dist}. If you want to use {ml-features}, +there must be at least one {ml} node in your cluster. For more information about +{ml-features}, see +{stack-ov}/xpack-ml.html[Machine learning in the {stack}]. ++ +IMPORTANT: If you use the {oss-dist}, do not set `node.ml`. Otherwise, the node +fails to start. + [NOTE] [[coordinating-node]] .Coordinating node @@ -86,24 +103,37 @@ better _not_ to use dedicated master nodes for this purpose. It is important for the stability of the cluster that master-eligible nodes do as little work as possible. -To create a dedicated master-eligible node, set: +To create a dedicated master-eligible node in the {default-dist}, set: [source,yaml] ------------------- node.master: true <1> node.data: false <2> node.ingest: false <3> -cluster.remote.connect: false <4> +node.ml: false <4> +xpack.ml.enabled: true <5> +cluster.remote.connect: false <6> ------------------- <1> The `node.master` role is enabled by default. <2> Disable the `node.data` role (enabled by default). <3> Disable the `node.ingest` role (enabled by default). -<4> Disable cross-cluster search (enabled by default). +<4> Disable the `node.ml` role (enabled by default). +<5> The `xpack.ml.enabled` setting is enabled by default. +<6> Disable {ccs} (enabled by default). -ifdef::include-xpack[] -NOTE: These settings apply only when {xpack} is not installed. To create a -dedicated master-eligible node when {xpack} is installed, see <>. -endif::include-xpack[] +To create a dedicated master-eligible node in the {oss-dist}, set: + +[source,yaml] +------------------- +node.master: true <1> +node.data: false <2> +node.ingest: false <3> +cluster.remote.connect: false <4> +------------------- +<1> The `node.master` role is enabled by default. +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable {ccs} (enabled by default). [float] [[data-node]] @@ -117,8 +147,22 @@ monitor these resources and to add more data nodes if they are overloaded. The main benefit of having dedicated data nodes is the separation of the master and data roles. -To create a dedicated data node, set: +To create a dedicated data node in the {default-dist}, set: +[source,yaml] +------------------- +node.master: false <1> +node.data: true <2> +node.ingest: false <3> +node.ml: false <4> +cluster.remote.connect: false <5> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> The `node.data` role is enabled by default. +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable the `node.ml` role (enabled by default). +<5> Disable {ccs} (enabled by default). +To create a dedicated data node in the {oss-dist}, set: [source,yaml] ------------------- node.master: false <1> @@ -129,12 +173,7 @@ cluster.remote.connect: false <4> <1> Disable the `node.master` role (enabled by default). <2> The `node.data` role is enabled by default. <3> Disable the `node.ingest` role (enabled by default). -<4> Disable cross-cluster search (enabled by default). - -ifdef::include-xpack[] -NOTE: These settings apply only when {xpack} is not installed. To create a -dedicated data node when {xpack} is installed, see <>. -endif::include-xpack[] +<4> Disable {ccs} (enabled by default). [float] [[node-ingest-node]] @@ -145,24 +184,35 @@ ingest processors. Depending on the type of operations performed by the ingest processors and the required resources, it may make sense to have dedicated ingest nodes, that will only perform this specific task. -To create a dedicated ingest node, set: +To create a dedicated ingest node in the {default-dist}, set: [source,yaml] ------------------- node.master: false <1> node.data: false <2> node.ingest: true <3> -cluster.remote.connect: false <4> +node.ml: false <4> +cluster.remote.connect: false <5> ------------------- <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). <3> The `node.ingest` role is enabled by default. -<4> Disable cross-cluster search (enabled by default). +<4> Disable the `node.ml` role (enabled by default). +<5> Disable {ccs} (enabled by default). -ifdef::include-xpack[] -NOTE: These settings apply only when {xpack} is not installed. To create a -dedicated ingest node when {xpack} is installed, see <>. -endif::include-xpack[] +To create a dedicated ingest node in the {oss-dist}, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: true <3> +cluster.remote.connect: false <4> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> The `node.ingest` role is enabled by default. +<4> Disable {ccs} (enabled by default). [float] [[coordinating-only-node]] @@ -185,7 +235,23 @@ acknowledgement of cluster state updates from every node! The benefit of coordinating only nodes should not be overstated -- data nodes can happily serve the same purpose. -To create a dedicated coordinating node, set: +To create a dedicated coordinating node in the {default-dist}, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: false <3> +node.ml: false <4> +cluster.remote.connect: false <5> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable the `node.ml` role (enabled by default). +<5> Disable {ccs} (enabled by default). + +To create a dedicated coordinating node in the {oss-dist}, set: [source,yaml] ------------------- @@ -197,12 +263,82 @@ cluster.remote.connect: false <4> <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). <3> Disable the `node.ingest` role (enabled by default). -<4> Disable cross-cluster search (enabled by default). +<4> Disable {ccs} (enabled by default). + +[float] +[[ml-node]] +=== [xpack]#Machine learning node# + +The {ml-features} provide {ml} nodes, which run jobs and handle {ml} API +requests. If `xpack.ml.enabled` is set to true and `node.ml` is set to `false`, +the node can service API requests but it cannot run jobs. + +If you want to use {ml-features} in your cluster, you must enable {ml} +(set `xpack.ml.enabled` to `true`) on all master-eligible nodes. If you have the +{oss-dist}, do not use these settings. + +For more information about these settings, see <>. + +To create a dedicated {ml} node in the {default-dist}, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: false <3> +node.ml: true <4> +xpack.ml.enabled: true <5> +cluster.remote.connect: false <6> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> The `node.ml` role is enabled by default. +<5> The `xpack.ml.enabled` setting is enabled by default. +<6> Disable {ccs} (enabled by default). + +[float] +[[change-node-role]] +=== Changing the role of a node -ifdef::include-xpack[] -NOTE: These settings apply only when {xpack} is not installed. To create a -dedicated coordinating node when {xpack} is installed, see <>. -endif::include-xpack[] +Each data node maintains the following data on disk: + +* the shard data for every shard allocated to that node, +* the index metadata corresponding with every shard allocated to that node, and +* the cluster-wide metadata, such as settings and index templates. + +Similarly, each master-eligible node maintains the following data on disk: + +* the index metadata for every index in the cluster, and +* the cluster-wide metadata, such as settings and index templates. + +Each node checks the contents of its data path at startup. If it discovers +unexpected data then it will refuse to start. This is to avoid importing +unwanted <> which can lead +to a red cluster health. To be more precise, nodes with `node.data: false` will +refuse to start if they find any shard data on disk at startup, and nodes with +both `node.master: false` and `node.data: false` will refuse to start if they +have any index metadata on disk at startup. + +It is possible to change the roles of a node by adjusting its +`elasticsearch.yml` file and restarting it. This is known as _repurposing_ a +node. In order to satisfy the checks for unexpected data described above, you +must perform some extra steps to prepare a node for repurposing when setting +its `node.data` or `node.master` roles to `false`: + +* If you want to repurpose a data node by changing `node.data` to `false` then + you should first use an <> to safely + migrate all the shard data onto other nodes in the cluster. + +* If you want to repurpose a node to have both `node.master: false` and + `node.data: false` then it is simplest to start a brand-new node with an + empty data path and the desired roles. You may find it safest to use an + <> to migrate the shard data + elsewhere in the cluster first. + +If it is not possible to follow these extra steps then you may be able to use +the <> tool to delete any +excess data that prevents a node from starting. [float] == Node data path settings @@ -255,7 +391,3 @@ lead to unexpected data loss. More node settings can be found in <>. Of particular note are the <>, the <> and the <>. - -ifdef::include-xpack[] -include::ml-node.asciidoc[] -endif::include-xpack[] diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 768eb7d6117bf..c3048ee2b86cf 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -2,46 +2,73 @@ == Remote clusters ifndef::include-xpack[] -The _remote clusters_ module allows establishing uni-directional connections to -a remote cluster. This functionality is used in -<>. +The _remote clusters_ module enables you to establish uni-directional +connections to a remote cluster. This functionality is used in +<>. endif::[] ifdef::include-xpack[] -The _remote clusters_ module allows establishing uni-directional connections to -a remote cluster. This functionality is used in cross-cluster replication, and -<>. +The _remote clusters_ module enables you to establish uni-directional +connections to a remote cluster. This functionality is used in +{stack-ov}/xpack-ccr.html[{ccr}] and +<>. endif::[] Remote cluster connections work by configuring a remote cluster and connecting -only to a limited number of nodes in the remote cluster. Each remote cluster is -referenced by a name and a list of seed nodes. When a remote cluster is -registered, its cluster state is retrieved from one of the seed nodes so that by -default up to three _gateway nodes_ are selected to be connected to as part of -remote cluster requests. Remote cluster connections consist of uni-directional -connections from the coordinating node to the previously selected remote nodes -only. It is possible to tag which nodes should be selected through node -attributes (see <>). - -Each node in a cluster that has remote clusters configured connects to one or -more _gateway nodes_ and uses them to federate requests to the remote cluster. +only to a limited number of nodes in that remote cluster. Each remote cluster +is referenced by a name and a list of seed nodes. When a remote cluster is +registered, its cluster state is retrieved from one of the seed nodes and up +to three _gateway nodes_ are selected to be connected to as part of remote +cluster requests. All the communication required between different clusters +goes through the <>. Remote cluster +connections consist of uni-directional connections from the coordinating +node to the selected remote _gateway nodes_ only. [float] -[[configuring-remote-clusters]] -=== Configuring Remote Clusters +[[gateway-nodes-selection]] +=== Gateway nodes selection + +The _gateway nodes_ selection depends on the following criteria: + +- *version*: Remote nodes must be compatible with the cluster they are +registered to. This is subject to rules that are similar to those for +<>. Any node can communicate with any other node on the same +major version (e.g. 7.0 can talk to any 7.x node). Only nodes on the last minor +version of a certain major version can communicate with nodes on the following +major version. Note that in the 6.x series, 6.8 can communicate with any 7.x +node, while 6.7 can only communicate with 7.0. Version compatibility is +symmetric, meaning that if 6.7 can communicate with 7.0, 7.0 can also +communicate with 6.7. The matrix below summarizes compatibility as described above. + +[cols="^,^,^,^,^,^,^,^"] +|==== +| Compatibility | 5.0->5.5 | 5.6 | 6.0->6.6 | 6.7 | 6.8 | 7.0 | 7.1->7.x +| 5.0->5.5 | Yes | Yes | No | No | No | No | No +| 5.6 | Yes | Yes | Yes | Yes | Yes | No | No +| 6.0->6.6 | No | Yes | Yes | Yes | Yes | No | No +| 6.7 | No | Yes | Yes | Yes | Yes | Yes | No +| 6.8 | No | Yes | Yes | Yes | Yes | Yes | Yes +| 7.0 | No | No | No | Yes | Yes | Yes | Yes +| 7.1->7.x | No | No | No | No | Yes | Yes | Yes +|==== + +- *role*: Dedicated master nodes never get selected. +- *attributes*: You can tag which nodes should be selected +(see <>), though such tagged nodes still have +to satisfy the two above requirements. -Remote clusters can be specified globally using -<> (which can be updated dynamically), -or local to individual nodes using the `elasticsearch.yml` file. +[float] +[[configuring-remote-clusters]] +=== Configuring remote clusters -If a remote cluster is configured via `elasticsearch.yml` only the nodes with -that configuration will be able to connect to the remote cluster. In other -words, functionality that relies on remote cluster requests will have to be -driven specifically from those nodes. Remote clusters set via the -<> will be available on every node -in the cluster. +You can configure remote clusters globally by using +<>, which you can update dynamically. +Alternatively, you can configure them locally on individual nodes by using the + `elasticsearch.yml` file. -The `elasticsearch.yml` config file for a node that connects to remote clusters -needs to list the remote clusters that should be connected to, for instance: +If you specify the settings in `elasticsearch.yml` files, only the nodes with +those settings can connect to the remote cluster. In other words, functionality +that relies on remote cluster requests must be driven specifically from those +nodes. For example: [source,yaml] -------------------------------- @@ -49,17 +76,26 @@ cluster: remote: cluster_one: <1> seeds: 127.0.0.1:9300 - cluster_two: <1> + transport.ping_schedule: 30s <2> + cluster_two: seeds: 127.0.0.1:9301 + transport.compress: true <3> + skip_unavailable: true <4> -------------------------------- <1> `cluster_one` and `cluster_two` are arbitrary _cluster aliases_ representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. +<2> A keep-alive ping is configured for `cluster_one`. +<3> Compression is explicitly enabled for requests to `cluster_two`. +<4> Disconnected remote clusters are optional for `cluster_two`. + +For more information about the optional transport settings, see +<>. + -The equivalent example using the <> to add remote clusters to all nodes in the cluster would look like the -following: +If you use <>, the remote clusters +are available on every node in the cluster. For example: [source,js] -------------------------------- @@ -71,12 +107,15 @@ PUT _cluster/settings "cluster_one": { "seeds": [ "127.0.0.1:9300" - ] + ], + "transport.ping_schedule": "30s" }, "cluster_two": { "seeds": [ "127.0.0.1:9301" - ] + ], + "transport.compress": true, + "skip_unavailable": true }, "cluster_three": { "seeds": [ @@ -92,8 +131,8 @@ PUT _cluster/settings // TEST[setup:host] // TEST[s/127.0.0.1:9300/\${transport_host}/] -A remote cluster can be deleted from the cluster settings by setting its seeds -to `null`: +You can dynamically update the compression and ping schedule settings. However, +you must re-include seeds in the settings update request. For example: [source,js] -------------------------------- @@ -102,8 +141,45 @@ PUT _cluster/settings "persistent": { "cluster": { "remote": { - "cluster_three": { - "seeds": null <1> + "cluster_one": { + "seeds": [ + "127.0.0.1:9300" + ], + "transport.ping_schedule": "60s" + }, + "cluster_two": { + "seeds": [ + "127.0.0.1:9301" + ], + "transport.compress": false + } + } + } + } +} +-------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: When the compression or ping schedule settings change, all the existing +node connections must close and re-open, which can cause in-flight requests to +fail. + +A remote cluster can be deleted from the cluster settings by setting its seeds and optional settings to `null` : + +[source,js] +-------------------------------- +PUT _cluster/settings +{ + "persistent": { + "cluster": { + "remote": { + "cluster_two": { <1> + "seeds": null, + "skip_unavailable": null, + "transport": { + "compress": null + } } } } @@ -112,8 +188,8 @@ PUT _cluster/settings -------------------------------- // CONSOLE // TEST[continued] -<1> `cluster_three` would be removed from the cluster settings, leaving -`cluster_one` and `cluster_two` intact. +<1> `cluster_two` would be removed from the cluster settings, leaving +`cluster_one` and `cluster_three` intact. [float] [[remote-cluster-settings]] @@ -159,7 +235,7 @@ PUT _cluster/settings clusters are kept alive. If set to `-1`, application-level ping messages to this remote cluster are not sent. If unset, application-level ping messages are sent according to the global `transport.ping_schedule` setting, which - defaults to ``-1` meaning that pings are not sent. + defaults to `-1` meaning that pings are not sent. `cluster.remote.${cluster_alias}.transport.compress`:: @@ -173,6 +249,6 @@ PUT _cluster/settings [[retrieve-remote-clusters-info]] === Retrieving remote clusters info -The <> allows to retrieve +You can use the <> to retrieve information about the configured remote clusters, as well as the remote nodes that the node is connected to. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 7ee545d66cf0f..2bd58171c7ede 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -1,6 +1,7 @@ [[modules-snapshots]] -== Snapshot And Restore +== Snapshot and Restore +// tag::snapshot-intro[] A snapshot is a backup taken from a running Elasticsearch cluster. You can take a snapshot of individual indices or of the entire cluster and store it in a repository on a shared filesystem, and there are plugins that support remote @@ -25,28 +26,30 @@ backup may fail, reporting corruption and/or missing files, or may appear to have succeeded having silently lost some of its data. The only reliable way to back up a cluster is by using the snapshot and restore functionality. +// end::snapshot-intro[] + [float] === Version compatibility -A snapshot contains a copy of the on-disk data structures that make up an -index. This means that snapshots can only be restored to versions of -Elasticsearch that can read the indices: - -* A snapshot of an index created in 5.x can be restored to 6.x. -* A snapshot of an index created in 2.x can be restored to 5.x. -* A snapshot of an index created in 1.x can be restored to 2.x. +To restore a snapshot of an index, the index must be compatible with the {es} +version you are restoring to. {es} can read indices created in the current or +previous major version. Compatibility is based on the version in which the index +was _created_, not the version from which the snapshot was taken. -Conversely, snapshots of indices created in 1.x **cannot** be restored to 5.x -or 6.x, and snapshots of indices created in 2.x **cannot** be restored to 6.x. +You can restore snapshots of indices created in {prev-major-version} to {es} +{version}. Snapshots of 5.x or earlier indices cannot be restored to {es} +{version}. -Each snapshot can contain indices created in various versions of Elasticsearch, -and when restoring a snapshot it must be possible to restore all of the indices -into the target cluster. If any indices in a snapshot were created in an -incompatible version, you will not be able restore the snapshot. +[IMPORTANT] +==== +Snapshots can contain indices created in more than one version of {es}. +If you attempt to restore a snapshot with incompatible indices, the restore +will fail. -IMPORTANT: When backing up your data prior to an upgrade, keep in mind that you -won't be able to restore snapshots after you upgrade if they contain indices -created in a version that's incompatible with the upgrade version. +When backing up your data prior to an upgrade, keep in mind that to restore the +snapshot to the upgraded cluster, all indices in the snapshot must be compatible +with the upgrade version. +==== If you end up in a situation where you need to restore a snapshot of an index that is incompatible with the version of the cluster you are currently running, @@ -112,7 +115,7 @@ which returns: ----------------------------------- // TESTRESPONSE -To retrieve information about multiple repositories, specify a comma-delimited +To retrieve information about multiple repositories, specify a comma-delimited list of repositories. You can also use the * wildcard when specifying repository names. For example, the following request retrieves information about all of the snapshot repositories that start with `repo` or @@ -204,8 +207,8 @@ The following settings are supported: [horizontal] `location`:: Location of the snapshots. Mandatory. `compress`:: Turns on compression of the snapshot files. Compression is applied only to metadata files (index mapping and settings). Data files are not compressed. Defaults to `true`. -`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by - using size value notation, i.e. 1g, 10m, 5k. Defaults to `null` (unlimited chunk size). +`chunk_size`:: Big files can be broken down into chunks during snapshotting if needed. Specify the chunk size as a value and +unit, for example: `1GB`, `10MB`, `5KB`, `500B`. Defaults to `null` (unlimited chunk size). `max_restore_bytes_per_sec`:: Throttles per node restore rate. Defaults to `40mb` per second. `max_snapshot_bytes_per_sec`:: Throttles per node snapshot rate. Defaults to `40mb` per second. `readonly`:: Makes repository read-only. Defaults to `false`. @@ -498,6 +501,7 @@ When a repository is unregistered, Elasticsearch only removes the reference to t the snapshots. The snapshots themselves are left untouched and in place. [float] +[[restore-snapshot]] === Restore A snapshot can be restored using the following command: @@ -690,6 +694,7 @@ GET /_snapshot/my_backup/snapshot_1,snapshot_2/_status // TEST[continued] [float] +[[monitor-snapshot-restore-progress]] === Monitoring snapshot/restore progress There are several ways to monitor the progress of the snapshot and restores processes while they are running. Both diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 38f8d40e67f23..3bea925f972e5 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -52,13 +52,14 @@ There are several thread pools, but the important ones include: Mainly for java client executing of action when listener threaded is set to true. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. -Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk` -thread pool to have more threads: +Changing a specific thread pool can be done by setting its type-specific +parameters; for example, changing the number of threads in the `write` thread +pool: [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 -------------------------------------------------- @@ -69,6 +70,7 @@ thread_pool: The following are the types of thread pools and their respective parameters: [float] +[[fixed]] ==== `fixed` The `fixed` thread pool holds a fixed size of threads to handle the @@ -86,12 +88,13 @@ full, it will abort the request. [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 queue_size: 1000 -------------------------------------------------- [float] +[[fixed-auto-queue-size]] ==== `fixed_auto_queue_size` experimental[] @@ -138,6 +141,7 @@ thread_pool: -------------------------------------------------- [float] +[[scaling]] ==== `scaling` The `scaling` thread pool holds a dynamic number of threads. This diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 180a8190868f0..5e0697d2e3c79 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -5,7 +5,8 @@ The transport module is used for internal communication between nodes within the cluster. Each call that goes from one node to the other uses the transport module (for example, when an HTTP GET request is processed by one node, and should actually be processed by another node that holds -the data). +the data). The transport module is also used for the `TransportClient` in the +{es} Java API. The transport mechanism is completely asynchronous in nature, meaning that there is no blocking thread waiting for a response. The benefit of @@ -156,7 +157,7 @@ PUT _cluster/settings { "transient" : { "transport.tracer.include" : "*", - "transport.tracer.exclude" : "internal:discovery/zen/fd*" + "transport.tracer.exclude" : "internal:coordination/fault_detection/*" } } -------------------------------------------------- diff --git a/docs/reference/monitoring/collecting-monitoring-data.asciidoc b/docs/reference/monitoring/collecting-monitoring-data.asciidoc index 8ab0443edb167..2c9582597461a 100644 --- a/docs/reference/monitoring/collecting-monitoring-data.asciidoc +++ b/docs/reference/monitoring/collecting-monitoring-data.asciidoc @@ -1,10 +1,7 @@ [role="xpack"] [testenv="gold"] [[collecting-monitoring-data]] -=== Collecting monitoring data -++++ -Collecting monitoring data -++++ +== Collecting monitoring data If you enable the Elastic {monitor-features} in your cluster, you can optionally collect metrics about {es}. By default, monitoring is enabled but @@ -21,9 +18,6 @@ Advanced monitoring settings enable you to control how frequently data is collected, configure timeouts, and set the retention period for locally-stored monitoring indices. You can also adjust how monitoring data is displayed. -To learn about monitoring in general, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. - . Configure your cluster to collect monitoring data: .. Verify that the `xpack.monitoring.enabled` setting is `true`, which is its @@ -111,7 +105,7 @@ cluster that stores the monitoring data must have at least one <>. For more information about typical monitoring architectures, -see {stack-ov}/how-monitoring-works.html[How Monitoring Works]. +see <>. -- . If you choose to use an `http` exporter: @@ -200,9 +194,7 @@ xpack.monitoring.exporters: -- . Configure your cluster to route monitoring data from sources such as {kib}, -Beats, and {ls} to the monitoring cluster. For information about configuring -each product to collect and send monitoring data, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +Beats, and {ls} to the monitoring cluster. . If you updated settings in the `elasticsearch.yml` files on your production cluster, restart {es}. See <> and <>. diff --git a/docs/reference/monitoring/collectors.asciidoc b/docs/reference/monitoring/collectors.asciidoc index bc77480561a8d..9f61be3234c2e 100644 --- a/docs/reference/monitoring/collectors.asciidoc +++ b/docs/reference/monitoring/collectors.asciidoc @@ -24,66 +24,59 @@ avoid many unnecessary calls. |======================= | Collector | Data Types | Description | Cluster Stats | `cluster_stats` -| Gathers details about the cluster state, including parts of -the actual cluster state (for example `GET /_cluster/state`) and statistics -about it (for example, `GET /_cluster/stats`). This produces a single document -type. In versions prior to X-Pack 5.5, this was actually three separate collectors -that resulted in three separate types: `cluster_stats`, `cluster_state`, and -`cluster_info`. In 5.5 and later, all three are combined into `cluster_stats`. -+ -This only runs on the _elected_ master node and the data collected -(`cluster_stats`) largely controls the UI. When this data is not present, it -indicates either a misconfiguration on the elected master node, timeouts related -to the collection of the data, or issues with storing the data. Only a single -document is produced per collection. +| Gathers details about the cluster state, including parts of the actual cluster +state (for example `GET /_cluster/state`) and statistics about it (for example, +`GET /_cluster/stats`). This produces a single document type. In versions prior +to X-Pack 5.5, this was actually three separate collectors that resulted in +three separate types: `cluster_stats`, `cluster_state`, and `cluster_info`. In +5.5 and later, all three are combined into `cluster_stats`. This only runs on +the _elected_ master node and the data collected (`cluster_stats`) largely +controls the UI. When this data is not present, it indicates either a +misconfiguration on the elected master node, timeouts related to the collection +of the data, or issues with storing the data. Only a single document is produced +per collection. | Index Stats | `indices_stats`, `index_stats` | Gathers details about the indices in the cluster, both in summary and individually. This creates many documents that represent parts of the index -statistics output (for example, `GET /_stats`). -+ -This information only needs to be collected once, so it is collected on the -_elected_ master node. The most common failure for this collector relates to an -extreme number of indices -- and therefore time to gather them -- resulting in -timeouts. One summary `indices_stats` document is produced per collection and one -`index_stats` document is produced per index, per collection. +statistics output (for example, `GET /_stats`). This information only needs to +be collected once, so it is collected on the _elected_ master node. The most +common failure for this collector relates to an extreme number of indices -- and +therefore time to gather them -- resulting in timeouts. One summary +`indices_stats` document is produced per collection and one `index_stats` +document is produced per index, per collection. | Index Recovery | `index_recovery` | Gathers details about index recovery in the cluster. Index recovery represents the assignment of _shards_ at the cluster level. If an index is not recovered, -it is not usable. This also corresponds to shard restoration via snapshots. -+ -This information only needs to be collected once, so it is collected on the -_elected_ master node. The most common failure for this collector relates to an -extreme number of shards -- and therefore time to gather them -- resulting in -timeouts. This creates a single document that contains all recoveries by default, -which can be quite large, but it gives the most accurate picture of recovery in -the production cluster. +it is not usable. This also corresponds to shard restoration via snapshots. This +information only needs to be collected once, so it is collected on the _elected_ +master node. The most common failure for this collector relates to an extreme +number of shards -- and therefore time to gather them -- resulting in timeouts. +This creates a single document that contains all recoveries by default, which +can be quite large, but it gives the most accurate picture of recovery in the +production cluster. | Shards | `shards` | Gathers details about all _allocated_ shards for all indices, particularly -including what node the shard is allocated to. -+ -This information only needs to be collected once, so it is collected on the -_elected_ master node. The collector uses the local cluster state to get the -routing table without any network timeout issues unlike most other collectors. -Each shard is represented by a separate monitoring document. +including what node the shard is allocated to. This information only needs to be +collected once, so it is collected on the _elected_ master node. The collector +uses the local cluster state to get the routing table without any network +timeout issues unlike most other collectors. Each shard is represented by a +separate monitoring document. | Jobs | `job_stats` -| Gathers details about all machine learning job statistics (for example, -`GET /_ml/anomaly_detectors/_stats`). -+ -This information only needs to be collected once, so it is collected on the -_elected_ master node. However, for the master node to be able to perform the -collection, the master node must have `xpack.ml.enabled` set to true (default) -and a license level that supports {ml}. +| Gathers details about all machine learning job statistics (for example, `GET +/_ml/anomaly_detectors/_stats`). This information only needs to be collected +once, so it is collected on the _elected_ master node. However, for the master +node to be able to perform the collection, the master node must have +`xpack.ml.enabled` set to true (default) and a license level that supports {ml}. | Node Stats | `node_stats` | Gathers details about the running node, such as memory utilization and CPU -usage (for example, `GET /_nodes/_local/stats`). -+ -This runs on _every_ node with {monitoring} enabled. One common failure -results in the timeout of the node stats request due to too many segment files. -As a result, the collector spends too much time waiting for the file system -stats to be calculated until it finally times out. A single `node_stats` -document is created per collection. This is collected per node to help to -discover issues with nodes communicating with each other, but not with the -monitoring cluster (for example, intermittent network issues or memory pressure). +usage (for example, `GET /_nodes/_local/stats`). This runs on _every_ node with +{monitoring} enabled. One common failure results in the timeout of the node +stats request due to too many segment files. As a result, the collector spends +too much time waiting for the file system stats to be calculated until it +finally times out. A single `node_stats` document is created per collection. +This is collected per node to help to discover issues with nodes communicating +with each other, but not with the monitoring cluster (for example, intermittent +network issues or memory pressure). |======================= {monitoring} uses a single threaded scheduler to run the collection of {es} @@ -117,7 +110,7 @@ For more information about the configuration options for the collectors, see [float] [[es-monitoring-stack]] -=== Collecting data from across the Elastic Stack +==== Collecting data from across the Elastic Stack {monitoring} in {es} also receives monitoring data from other parts of the Elastic Stack. In this way, it serves as an unscheduled monitoring data @@ -147,4 +140,4 @@ related to monitoring data, which can be very useful when there are a large number of Logstash nodes or Beats. For more information about typical monitoring architectures, see -{xpack-ref}/how-monitoring-works.html[How Monitoring Works]. +<>. diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index a161559d3f103..c8f41b18bfa91 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -1,7 +1,9 @@ [role="xpack"] [testenv="gold"] [[configuring-metricbeat]] -=== Collecting {es} monitoring data with {metricbeat} +== Collecting {es} monitoring data with {metricbeat} + +[subs="attributes"] ++++ Collecting monitoring data with {metricbeat} ++++ @@ -15,17 +17,16 @@ image::monitoring/images/metricbeat.png[Example monitoring architecture] To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. -. Enable the collection of monitoring data. Set -`xpack.monitoring.collection.enabled` to `true` on each node in the production -cluster. By default, it is is disabled (`false`). -+ +//NOTE: The tagged regions are re-used in the Stack Overview. + +. Enable the collection of monitoring data. ++ -- -NOTE: You can specify this setting in either the `elasticsearch.yml` on each -node or across the cluster as a dynamic cluster setting. If {es} -{security-features} are enabled, you must have `monitor` cluster privileges to -view the cluster settings and `manage` cluster privileges to change them. +// tag::enable-collection[] +Set `xpack.monitoring.collection.enabled` to `true` on the +production cluster. By default, it is is disabled (`false`). -For example, you can use the following APIs to review and change this setting: +You can use the following APIs to review and change this setting: [source,js] ---------------------------------- @@ -40,42 +41,20 @@ PUT _cluster/settings ---------------------------------- // CONSOLE -For more information, see <> and <>. --- - -. Disable the default collection of {es} monitoring metrics. Set -`xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node in -the production cluster. -+ --- -NOTE: You can specify this setting in either the `elasticsearch.yml` on each -node or across the cluster as a dynamic cluster setting. If {es} -{security-features} are enabled, you must have `monitor` cluster privileges to +If {es} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. -For example, you can use the following API to change this setting: - -[source,js] ----------------------------------- -PUT _cluster/settings -{ - "persistent": { - "xpack.monitoring.elasticsearch.collection.enabled": false - } -} ----------------------------------- -// CONSOLE - -Leave `xpack.monitoring.enabled` set to its default value (`true`). +// end::enable-collection[] +For more information, see <> and <>. -- -. On each {es} node in the production cluster: - -.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}]. +. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on each +{es} node in the production cluster. -.. Enable the {es} module in {metricbeat}. + +. Enable the {es} module in {metricbeat} on each {es} node. + + -- +// tag::enable-es-module[] For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -87,12 +66,16 @@ metricbeat modules enable elasticsearch For more information, see {metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. + +// end::enable-es-module[] -- -.. Configure the {es} module in {metricbeat}. + +. Configure the {es} module in {metricbeat}. + + -- -You must specify the following settings in the `modules.d/elasticsearch.yml` file: +// tag::configure-es-module[] +For example, specify the following settings in the `modules.d/elasticsearch.yml` +file: [source,yaml] ---------------------------------- @@ -107,42 +90,49 @@ You must specify the following settings in the `modules.d/elasticsearch.yml` fil - node_stats - shard period: 10s - hosts: ["http://localhost:9200"] <1> - xpack.enabled: true <2> + hosts: ["http://localhost:9200"] + #username: "user" + #password: "secret" + xpack.enabled: true ---------------------------------- -<1> This setting identifies the host and port number that are used to access {es}. -<2> This setting ensures that {kib} can read this monitoring data successfully. -That is to say, it's stored in the same location and format as monitoring data -that is sent by <>. --- -.. If Elastic {security-features} are enabled, you must also provide a user ID -and password so that {metricbeat} can collect metrics successfully. +By default, the module collects {es} monitoring metrics from +`http://localhost:9200`. If that host and port number are not correct, you must +update the `hosts` setting. If you configured {es} to use encrypted +communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://localhost:9200`. +// end::configure-es-module[] -... Create a user on the production cluster that has the +// tag::remote-monitoring-user[] +If Elastic {security-features} are enabled, you must also provide a user ID +and password so that {metricbeat} can collect metrics successfully: + +.. Create a user on the production cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} module configuration +.. Add the `username` and `password` settings to the {es} module configuration file. -+ +// end::remote-monitoring-user[] -- -For example, add the following settings in the `modules.d/elasticsearch.yml` file: -[source,yaml] ----------------------------------- -- module: elasticsearch - ... - username: remote_monitoring_user - password: YOUR_PASSWORD ----------------------------------- +. Optional: Disable the system module in {metricbeat}. ++ -- +// tag::disable-system-module[] +By default, the {metricbeat-ref}/metricbeat-module-system.html[system module] is +enabled. The information it collects, however, is not shown on the *Monitoring* +page in {kib}. Unless you want to use that information for other purposes, run +the following command: -.. If you configured {es} to use <>, -you must access it via HTTPS. For example, use a `hosts` setting like -`https://localhost:9200` in the `modules.d/elasticsearch.yml` file. +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +metricbeat modules disable system +---------------------------------------------------------------------- + +// end::disable-system-module[] +-- -.. Identify where to send the monitoring data. + +. Identify where to send the monitoring data. + + -- TIP: In production environments, we strongly recommend using a separate cluster @@ -157,48 +147,68 @@ configuration file (`metricbeat.yml`): [source,yaml] ---------------------------------- output.elasticsearch: + # Array of hosts to connect to. hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" ---------------------------------- <1> In this example, the data is stored on a monitoring cluster with nodes -`es-mon-1` and `es-mon-2`. +`es-mon-1` and `es-mon-2`. + +If you configured the monitoring cluster to use encrypted communications, you +must access it via HTTPS. For example, use a `hosts` setting like +`https://es-mon-1:9200`. IMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one <>. -For more information about these configuration options, see -{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. --- - -.. If {es} {security-features} are enabled on the monitoring cluster, you +If {es} {security-features} are enabled on the monitoring cluster, you must provide a valid user ID and password so that {metricbeat} can send metrics successfully. -... Create a user on the monitoring cluster that has the +.. Create a user on the monitoring cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} output information in -the {metricbeat} configuration file (`metricbeat.yml`): +.. Add the `username` and `password` settings to the {es} output information in +the {metricbeat} configuration file. + +For more information about these configuration options, see +{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. +-- + +. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}] on each node. + +. Disable the default collection of {es} monitoring metrics. + + -- -[source,yaml] +// tag::disable-default-collection[] +Set `xpack.monitoring.elasticsearch.collection.enabled` to `false` on the +production cluster. + +You can use the following API to change this setting: + +[source,js] ---------------------------------- -output.elasticsearch: - ... - username: remote_monitoring_user - password: YOUR_PASSWORD +PUT _cluster/settings +{ + "persistent": { + "xpack.monitoring.elasticsearch.collection.enabled": false + } +} ---------------------------------- --- - -.. If you configured the monitoring cluster to use -<>, you must access it via -HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the -`metricbeat.yml` file. +// CONSOLE -. <>. +If {es} {security-features} are enabled, you must have `monitor` cluster +privileges to view the cluster settings and `manage` cluster privileges +to change them. -. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}]. +// end::disable-default-collection[] +-- . {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. diff --git a/docs/reference/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc deleted file mode 100644 index 058dbde062e94..0000000000000 --- a/docs/reference/monitoring/configuring-monitoring.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[role="xpack"] -[testenv="gold"] -[[configuring-monitoring]] -== Configuring monitoring in {es} -++++ -Configuring monitoring -++++ - -If you enable the Elastic {monitor-features} in your cluster, there are two -methods to collect metrics about {es}: - -* <> -* <> - -To learn about monitoring in general, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. - -include::collecting-monitoring-data.asciidoc[] -include::configuring-metricbeat.asciidoc[] -include::indices.asciidoc[] -include::{es-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file diff --git a/docs/reference/monitoring/exporters.asciidoc b/docs/reference/monitoring/exporters.asciidoc index fee09015dbbd9..742b24608f250 100644 --- a/docs/reference/monitoring/exporters.asciidoc +++ b/docs/reference/monitoring/exporters.asciidoc @@ -158,6 +158,3 @@ which is used to determine whether the resource should be replaced. The `version field value represents the latest version of {monitoring} that changed the resource. If a resource is edited by someone or something external to {monitoring}, those changes are lost the next time an automatic update occurs. - -include::local-export.asciidoc[] -include::http-export.asciidoc[] diff --git a/docs/reference/monitoring/how-monitoring-works.asciidoc b/docs/reference/monitoring/how-monitoring-works.asciidoc new file mode 100644 index 0000000000000..e80c04eceacb6 --- /dev/null +++ b/docs/reference/monitoring/how-monitoring-works.asciidoc @@ -0,0 +1,40 @@ +[role="xpack"] +[testenv="basic"] +[[how-monitoring-works]] +== How monitoring works +++++ +How it works +++++ + +Each {es} node, {ls} node, {kib} instance, and Beat is considered unique in the +cluster based on its persistent UUID, which is written to the +<> directory when the node or instance starts. + +Monitoring documents are just ordinary JSON documents built by monitoring each +{stack} component at some collection interval. If you want to alter the +templates for these indices, see <>. + +Each component in the stack is responsible for monitoring itself and then +forwarding those documents to the {es} production cluster for both routing and +indexing (storage). The routing and indexing processes in {es} are handled by +what are called <> and +<>. + +Alternatively, in 6.4 and later, you can use {metricbeat} to collect +monitoring data about {kib} and ship it directly to the monitoring cluster, +rather than routing it through the production cluster. In 6.5 and later, you +can also use {metricbeat} to collect and ship data about {es}. + +To learn how to collect monitoring data, see: + +* <> +* <> +* {kibana-ref}/xpack-monitoring.html[Monitoring {kib}] +* {logstash-ref}/monitoring-logstash.html[Monitoring Logstash] +* Monitoring Beats: +** {auditbeat-ref}/monitoring.html[Auditbeat] +** {filebeat-ref}/monitoring.html[Filebeat] +** {heartbeat-ref}/monitoring.html[Heartbeat] +** {metricbeat-ref}/monitoring.html[Metricbeat] +** {packetbeat-ref}/monitoring.html[Packetbeat] +** {winlogbeat-ref}/monitoring.html[Winlogbeat] diff --git a/docs/reference/monitoring/http-export.asciidoc b/docs/reference/monitoring/http-export.asciidoc index fce22bd5d78be..eaca9904d04d1 100644 --- a/docs/reference/monitoring/http-export.asciidoc +++ b/docs/reference/monitoring/http-export.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[http-exporter]] -=== HTTP Exporters +=== HTTP exporters The `http` exporter is the preferred exporter in {monitoring} because it enables the use of a separate monitoring cluster. As a secondary benefit, it avoids @@ -91,27 +91,3 @@ monitoring clusters. For more information about the configuration options for the `http` exporter, see <>. - -[float] -[[http-exporter-dns]] -==== Using DNS Hosts in HTTP Exporters - -{monitoring} runs inside of the JVM security manager. When the JVM has the -security manager enabled, the JVM changes the duration so that it caches DNS -lookups indefinitely (for example, the mapping of a DNS hostname to an IP -address). For this reason, if you are in an environment where the DNS response -might change from time-to-time (for example, talking to any load balanced cloud -provider), you are strongly discouraged from using DNS hostnames. - -Alternatively, you can set the JVM security property `networkaddress.cache.ttl`, -which accepts values in seconds. This property must be set for the node's JVM that -uses {monitoring} for {es} when using DNS that can change IP addresses. If you -do not apply this setting, the connection consistently fails after the IP -address changes. - -IMPORTANT: JVM security properties are different than system properties. They -cannot be set at startup via `-D` system property settings and instead they must -be set in code before the security manager has been setup _or_, more -appropriately, in the `$JAVA_HOME/lib/security/java.security` file. - -Restarting the node (and therefore the JVM) results in its cache being flushed. diff --git a/docs/reference/monitoring/images/architecture10.png b/docs/reference/monitoring/images/architecture10.png new file mode 100644 index 0000000000000..dfcb417d812a5 Binary files /dev/null and b/docs/reference/monitoring/images/architecture10.png differ diff --git a/docs/reference/monitoring/images/architecture20.png b/docs/reference/monitoring/images/architecture20.png new file mode 100644 index 0000000000000..9a45ce0423e1c Binary files /dev/null and b/docs/reference/monitoring/images/architecture20.png differ diff --git a/docs/reference/monitoring/index.asciidoc b/docs/reference/monitoring/index.asciidoc index fbda72e0f979a..b6866bfffe998 100644 --- a/docs/reference/monitoring/index.asciidoc +++ b/docs/reference/monitoring/index.asciidoc @@ -1,54 +1,34 @@ [role="xpack"] [testenv="basic"] -[[es-monitoring]] -= Monitoring {es} +[[monitor-elasticsearch-cluster]] += Monitor a cluster [partintro] -- -The Elastic {monitor-features} enable you to easily monitor the health of -your {es} cluster. The monitoring metrics are collected from each node and -stored in {es} indices. - -TIP: In production environments, it is recommended to store the monitoring data -in a separate _monitoring cluster_. See -{stack-ov}/monitoring-production.html[Monitoring in a production environment]. - -Each {es} node is considered unique based on its persistent UUID, which is -written on first start to its <> directory, which -defaults to `./data`. - -All settings associated with monitoring in {es} must be set in either the -`elasticsearch.yml` file for each node or, where possible, in the dynamic -cluster settings. For more information, see <>. - -[[es-monitoring-overview]] -{es} is also at the core of monitoring across the {stack}. In all cases, -monitoring documents are just ordinary JSON documents built by monitoring each -{stack} component at some collection interval, then indexing those -documents into the monitoring cluster. - -Each component in the stack is responsible for monitoring itself and then -forwarding those documents to the {es} production cluster for both routing and -indexing (storage). The routing and indexing processes in {es} are handled by -what are called <> and -<>. - -Alternatively, in 6.4 and later, you can use {metricbeat} to collect -monitoring data about {kib} and ship it directly to the monitoring cluster, -rather than routing it through the production cluster. In 6.5 and later, you -can also use {metricbeat} to collect and ship data about {es}. - -You can view monitoring data from {kib} where it’s easy to spot issues at a -glance or delve into the system behavior over time to diagnose operational -issues. In addition to the built-in status warnings, you can also set up custom -alerts based on the data in the monitoring indices. - -For an introduction to monitoring your {stack}, including Beats, {ls}, and {kib}, -see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. - +The {stack} {monitor-features} provide a way to keep a pulse on the health and +performance of your {es} cluster. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> -- +include::overview.asciidoc[] +include::how-monitoring-works.asciidoc[] +include::production.asciidoc[] +include::collecting-monitoring-data.asciidoc[] +include::pause-export.asciidoc[] +include::configuring-metricbeat.asciidoc[] +include::indices.asciidoc[] include::collectors.asciidoc[] include::exporters.asciidoc[] -include::pause-export.asciidoc[] +include::local-export.asciidoc[] +include::http-export.asciidoc[] +include::troubleshooting.asciidoc[] diff --git a/docs/reference/monitoring/indices.asciidoc b/docs/reference/monitoring/indices.asciidoc index 34cbced1c4332..a63c215428f73 100644 --- a/docs/reference/monitoring/indices.asciidoc +++ b/docs/reference/monitoring/indices.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[config-monitoring-indices]] -=== Configuring indices for monitoring +== Configuring indices for monitoring <> are used to configure the indices that store the monitoring data collected from a cluster. diff --git a/docs/reference/monitoring/local-export.asciidoc b/docs/reference/monitoring/local-export.asciidoc index 821a6b1fc0e13..8723b226ca766 100644 --- a/docs/reference/monitoring/local-export.asciidoc +++ b/docs/reference/monitoring/local-export.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[local-exporter]] -=== Local Exporters +=== Local exporters The `local` exporter is the default exporter in {monitoring}. It routes data back into the same (local) cluster. In other words, it uses the production @@ -56,7 +56,7 @@ For more information about the configuration options for the `local` exporter, see <>. [[local-exporter-cleaner]] -==== Cleaner Service +==== Cleaner service One feature of the `local` exporter, which is not present in the `http` exporter, is a cleaner service. The cleaner service runs once per day at 01:00 AM UTC on diff --git a/docs/reference/monitoring/overview.asciidoc b/docs/reference/monitoring/overview.asciidoc new file mode 100644 index 0000000000000..7fd6899ab38e1 --- /dev/null +++ b/docs/reference/monitoring/overview.asciidoc @@ -0,0 +1,40 @@ +[role="xpack"] +[testenv="basic"] +[[monitoring-overview]] +== Monitoring overview +++++ +Overview +++++ + +When you monitor a cluster, you collect data from the {es} nodes, {ls} nodes, +and {kib} instances in your cluster. + +All of the monitoring metrics are stored in {es}, which enables you to easily +visualize the data from {kib}. By default, the monitoring metrics are stored in +local indices. + +TIP: In production, we strongly recommend using a separate monitoring cluster. Using a separate monitoring cluster prevents production cluster outages from impacting your ability to access your monitoring data. It also prevents +monitoring activities from impacting the performance of your production cluster. For the same reason, we also recommend using a separate {kib} instance for +viewing the monitoring data. + +The following diagram illustrates a typical monitoring architecture with separate +production and monitoring clusters: + +image::monitoring/images/architecture10.png["A typical monitoring environment"] + +In 6.4 and later, you can use {metricbeat} to collect and ship data about +{kib}, rather than routing it through {es}. In 6.5 and later, you can also use +{metricbeat} to collect and ship data about {es}. For example: + +image::monitoring/images/architecture20.png[A typical monitoring environment that includes {metricbeat}] + +If you have the appropriate license, you can route data from multiple production +clusters to a single monitoring cluster. For more information about the +differences between various subscription levels, see: +https://www.elastic.co/subscriptions + +IMPORTANT: In general, the monitoring cluster and the clusters being monitored +should be running the same version of the stack. A monitoring cluster cannot +monitor production clusters running newer versions of the stack. If necessary, +the monitoring cluster can monitor production clusters running the latest +release of the previous major version. \ No newline at end of file diff --git a/docs/reference/monitoring/pause-export.asciidoc b/docs/reference/monitoring/pause-export.asciidoc index 7a8bc664ffc38..6cf02a1f2401d 100644 --- a/docs/reference/monitoring/pause-export.asciidoc +++ b/docs/reference/monitoring/pause-export.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[pause-export]] -== Pausing Data Collection +=== Pausing data collection To stop generating {monitoring} data in {es}, disable data collection: diff --git a/docs/reference/monitoring/production.asciidoc b/docs/reference/monitoring/production.asciidoc new file mode 100644 index 0000000000000..f9ff935c45b21 --- /dev/null +++ b/docs/reference/monitoring/production.asciidoc @@ -0,0 +1,126 @@ +[role="xpack"] +[testenv="gold"] +[[monitoring-production]] +== Monitoring in a production environment + +In production, you should send monitoring data to a separate _monitoring cluster_ +so that historical data is available even when the nodes you are monitoring are +not. For example, you can use {metricbeat} to ship monitoring data about {kib} +and {es} to the monitoring cluster. +//If you are sending your data to the ESMS, see <>. + +If you have an appropriate license, using a dedicated monitoring cluster also +enables you to monitor multiple clusters from a central location. + +To store monitoring data in a separate cluster: + +. Set up the {es} cluster you want to use as the monitoring cluster. +For example, you might set up a two host cluster with the nodes `es-mon-1` and +`es-mon-2`. ++ +-- +[IMPORTANT] +=============================== +* To monitor an {es} 7.x cluster, you must run {es} +7.x on the monitoring cluster. +* There must be at least one <> in the monitoring +cluster; it does not need to be a dedicated ingest node. +=============================== +-- + +.. (Optional) Verify that the collection of monitoring data is disabled on the +monitoring cluster. By default, the `xpack.monitoring.collection.enabled` setting +is `false`. ++ +-- +For example, you can use the following APIs to review and change this setting: + +[source,js] +---------------------------------- +GET _cluster/settings + +PUT _cluster/settings +{ + "persistent": { + "xpack.monitoring.collection.enabled": false + } +} +---------------------------------- +// CONSOLE +-- + +.. If the {es} {security-features} are enabled on the monitoring cluster, create +users that can send and retrieve monitoring data. ++ +-- +NOTE: If you plan to use {kib} to view monitoring data, username and password +credentials must be valid on both the {kib} server and the monitoring cluster. + +-- + +*** If you plan to use {metricbeat} to collect data about {es} or {kib}, +create a user that has the `remote_monitoring_collector` built-in role and a +user that has the `remote_monitoring_agent` +{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[built-in role]. +Alternatively, use the `remote_monitoring_user` +{stack-ov}/built-in-users.html[built-in user]. + +*** If you plan to use HTTP exporters to route data through your production +cluster, create a user that has the `remote_monitoring_agent` +{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[built-in role]. ++ +-- +For example, the +following request creates a `remote_monitor` user that has the +`remote_monitoring_agent` role: + +[source, sh] +--------------------------------------------------------------- +POST /_security/user/remote_monitor +{ + "password" : "changeme", + "roles" : [ "remote_monitoring_agent"], + "full_name" : "Internal Agent For Remote Monitoring" +} +--------------------------------------------------------------- +// CONSOLE +// TEST[skip:needs-gold+-license] + +Alternatively, use the `remote_monitoring_user` +{stack-ov}/built-in-users.html[built-in user]. +-- + +. Configure your production cluster to collect data and send it to the +monitoring cluster. + +** {ref}/configuring-metricbeat.html[Use {metricbeat}]. This option +is available in 6.5 and later versions. + +** {ref}/configuring-monitoring.html[Use HTTP exporters]. + +. (Optional) +{logstash-ref}/configuring-logstash.html[Configure {ls} to collect data and send it to the monitoring cluster]. ++ +-- +NOTE: You must configure HTTP exporters in the production cluster to route this +data to the monitoring cluster. It cannot be accomplished by using {metricbeat}. + +-- + +. (Optional) Configure {kib} to collect data and send it to the monitoring cluster: + +** {kibana-ref}/monitoring-metricbeat.html[Use {metricbeat}]. This +option is available in 6.4 and later versions. + +** {kibana-ref}/monitoring-kibana.html[Use HTTP exporters]. + +. (Optional) Create a dedicated {kib} instance for monitoring, rather than using +a single {kib} instance to access both your production cluster and monitoring +cluster. + +.. (Optional) Disable the collection of monitoring data in this {kib} instance. +Set the `xpack.monitoring.kibana.collection.enabled` setting to `false` in the +`kibana.yml` file. For more information about this setting, see +{kibana-ref}/monitoring-settings-kb.html[Monitoring settings in {kib}]. + +. {kibana-ref}/monitoring-data.html[Configure {kib} to retrieve and display the monitoring data]. diff --git a/docs/reference/monitoring/troubleshooting.asciidoc b/docs/reference/monitoring/troubleshooting.asciidoc new file mode 100644 index 0000000000000..0bc3cf0900a3a --- /dev/null +++ b/docs/reference/monitoring/troubleshooting.asciidoc @@ -0,0 +1,28 @@ +[[monitoring-troubleshooting]] +== Troubleshooting monitoring +++++ +Troubleshooting +++++ + +Use the information in this section to troubleshoot common problems and find +answers for frequently asked questions. See also +{logstash-ref}/monitoring-troubleshooting.html[Troubleshooting monitoring in {ls}]. + +For issues that you cannot fix yourself … we’re here to help. +If you are an existing Elastic customer with a support contract, please create +a ticket in the +https://support.elastic.co/customers/s/login/[Elastic Support portal]. +Or post in the https://discuss.elastic.co/[Elastic forum]. + +*Symptoms*: +There is no information about your cluster on the *Monitoring* page in {kib}. + +*Resolution*: +Check whether the appropriate indices exist on the monitoring cluster. For +example, use the {ref}/cat-indices.html[cat indices] command to verify that +there is a `.monitoring-kibana*` index for your {kib} monitoring data and a +`.monitoring-es*` index for your {es} monitoring data. If you are collecting +monitoring data by using {metricbeat} the indices have `-mb` in their names. If +the indices do not exist, review your configuration. For example, see + <>. + diff --git a/docs/reference/query-dsl.asciidoc b/docs/reference/query-dsl.asciidoc index 8d9c803b6159a..1a279101531c2 100644 --- a/docs/reference/query-dsl.asciidoc +++ b/docs/reference/query-dsl.asciidoc @@ -29,22 +29,24 @@ Query clauses behave differently depending on whether they are used in include::query-dsl/query_filter_context.asciidoc[] -include::query-dsl/match-all-query.asciidoc[] +include::query-dsl/compound-queries.asciidoc[] include::query-dsl/full-text-queries.asciidoc[] -include::query-dsl/term-level-queries.asciidoc[] - -include::query-dsl/compound-queries.asciidoc[] +include::query-dsl/geo-queries.asciidoc[] include::query-dsl/joining-queries.asciidoc[] -include::query-dsl/geo-queries.asciidoc[] +include::query-dsl/match-all-query.asciidoc[] + +include::query-dsl/span-queries.asciidoc[] include::query-dsl/special-queries.asciidoc[] -include::query-dsl/span-queries.asciidoc[] +include::query-dsl/term-level-queries.asciidoc[] include::query-dsl/minimum-should-match.asciidoc[] include::query-dsl/multi-term-rewrite.asciidoc[] + +include::query-dsl/regexp-syntax.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index d4b5919454836..4a9a3a557e944 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-bool-query]] -=== Bool Query +=== Boolean query +++++ +Boolean +++++ A query that matches documents matching boolean combinations of other queries. The bool query maps to Lucene `BooleanQuery`. It is built using @@ -58,6 +61,7 @@ POST _search -------------------------------------------------- // CONSOLE +[[score-bool-filter]] ==== Scoring with `bool.filter` Queries specified under the `filter` element have no effect on scoring -- diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index 5cd12ce1f00b7..4f2f60e5e2220 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -1,36 +1,64 @@ [[query-dsl-boosting-query]] -=== Boosting Query +=== Boosting query +++++ +Boosting +++++ -The `boosting` query can be used to effectively demote results that -match a given query. Unlike the "NOT" clause in bool query, this still -selects documents that contain undesirable terms, but reduces their -overall score. +Returns documents matching a `positive` query while reducing the +<> of documents that also match a +`negative` query. -It accepts a `positive` query and a `negative` query. -Only documents that match the `positive` query will be included -in the results list, but documents that also match the `negative` query -will be downgraded by multiplying the original `_score` of the document -with the `negative_boost`. +You can use the `boosting` query to demote certain documents without +excluding them from the search results. + +[[boosting-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { "boosting" : { "positive" : { "term" : { - "field1" : "value1" + "text" : "apple" } }, "negative" : { "term" : { - "field2" : "value2" + "text" : "pie tart fruit crumble tree" } }, - "negative_boost" : 0.2 + "negative_boost" : 0.5 } } } --------------------------------------------------- +---- // CONSOLE + +[[boosting-top-level-params]] +==== Top-level parameters for `boosting` + +`positive`:: +(Required, query object) Query you wish to run. Any returned documents must +match this query. + +`negative`:: ++ +-- +(Required, query object) Query used to decrease the <> of matching documents. + +If a returned document matches the `positive` query and this query, the +`boosting` query calculates the final <> for +the document as follows: + +. Take the original relevance score from the `positive` query. +. Multiply the score by the `negative_boost` value. +-- + +`negative_boost`:: +(Required, float) Floating point number between `0` and `1.0` used to decrease +the <> of documents matching the +`negative` query. \ No newline at end of file diff --git a/docs/reference/query-dsl/compound-queries.asciidoc b/docs/reference/query-dsl/compound-queries.asciidoc index bee5787df1d28..d156950e35579 100644 --- a/docs/reference/query-dsl/compound-queries.asciidoc +++ b/docs/reference/query-dsl/compound-queries.asciidoc @@ -7,39 +7,34 @@ filter context. The queries in this group are: -<>:: - -A query which wraps another query, but executes it in filter context. All -matching documents are given the same ``constant'' `_score`. - <>:: - The default query for combining multiple leaf or compound query clauses, as `must`, `should`, `must_not`, or `filter` clauses. The `must` and `should` clauses have their scores combined -- the more matching clauses, the better -- while the `must_not` and `filter` clauses are executed in filter context. -<>:: +<>:: +Return documents which match a `positive` query, but reduce the score of +documents which also match a `negative` query. +<>:: +A query which wraps another query, but executes it in filter context. All +matching documents are given the same ``constant'' `_score`. + +<>:: A query which accepts multiple queries, and returns any documents which match any of the query clauses. While the `bool` query combines the scores from all matching queries, the `dis_max` query uses the score of the single best- matching query clause. <>:: - Modify the scores returned by the main query with functions to take into account factors like popularity, recency, distance, or custom algorithms implemented with scripting. -<>:: - -Return documents which match a `positive` query, but reduce the score of -documents which also match a `negative` query. - -include::constant-score-query.asciidoc[] include::bool-query.asciidoc[] -include::dis-max-query.asciidoc[] -include::function-score-query.asciidoc[] include::boosting-query.asciidoc[] +include::constant-score-query.asciidoc[] +include::dis-max-query.asciidoc[] +include::function-score-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/constant-score-query.asciidoc b/docs/reference/query-dsl/constant-score-query.asciidoc index aa7ee60aa5c6f..dd55e38b3b084 100644 --- a/docs/reference/query-dsl/constant-score-query.asciidoc +++ b/docs/reference/query-dsl/constant-score-query.asciidoc @@ -1,12 +1,15 @@ [[query-dsl-constant-score-query]] -=== Constant Score Query +=== Constant score query +++++ +Constant score +++++ -A query that wraps another query and simply returns a -constant score equal to the query boost for every document in the -filter. Maps to Lucene `ConstantScoreQuery`. +Wraps a <> and returns every matching +document with a <> equal to the `boost` +parameter value. [source,js] --------------------------------------------------- +---- GET /_search { "query": { @@ -18,8 +21,22 @@ GET /_search } } } --------------------------------------------------- +---- // CONSOLE -Filter clauses are executed in <>, -meaning that scoring is ignored and clauses are considered for caching. +[[constant-score-top-level-params]] +==== Top-level parameters for `constant_score` +`filter`:: ++ +-- +(Required, query object) <> you wish to run. +Any returned documents must match this query. + +Filter queries do not calculate <>. To +speed up performance, {es} automatically caches frequently used filter queries. +-- + +`boost`:: +(Optional, float) Floating point number used as the constant +<> for every document matching the +`filter` query. Defaults to `1.0`. \ No newline at end of file diff --git a/docs/reference/query-dsl/dis-max-query.asciidoc b/docs/reference/query-dsl/dis-max-query.asciidoc index 1f9fc53d66d9f..57c012802d0cb 100644 --- a/docs/reference/query-dsl/dis-max-query.asciidoc +++ b/docs/reference/query-dsl/dis-max-query.asciidoc @@ -1,48 +1,67 @@ [[query-dsl-dis-max-query]] -=== Dis Max Query - -A query that generates the union of documents produced by its -subqueries, and that scores each document with the maximum score for -that document as produced by any subquery, plus a tie breaking increment -for any additional matching subqueries. - -This is useful when searching for a word in multiple fields with -different boost factors (so that the fields cannot be combined -equivalently into a single search field). We want the primary score to -be the one associated with the highest boost, not the sum of the field -scores (as Boolean Query would give). If the query is "albino elephant" -this ensures that "albino" matching one field and "elephant" matching -another gets a higher score than "albino" matching both fields. To get -this result, use both Boolean Query and DisjunctionMax Query: for each -term a DisjunctionMaxQuery searches for it in each field, while the set -of these DisjunctionMaxQuery's is combined into a BooleanQuery. - -The tie breaker capability allows results that include the same term in -multiple fields to be judged better than results that include this term -in only the best of those multiple fields, without confusing this with -the better case of two different terms in the multiple fields.The -default `tie_breaker` is `0.0`. - -This query maps to Lucene `DisjunctionMaxQuery`. +=== Disjunction max query +++++ +Disjunction max +++++ + +Returns documents matching one or more wrapped queries, called query clauses or +clauses. + +If a returned document matches multiple query clauses, the `dis_max` query +assigns the document the highest relevance score from any matching clause, plus +a tie breaking increment for any additional matching subqueries. + +You can use the `dis_max` to search for a term in fields mapped with different +<> factors. + +[[query-dsl-dis-max-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { "dis_max" : { - "tie_breaker" : 0.7, - "boost" : 1.2, "queries" : [ - { - "term" : { "age" : 34 } - }, - { - "term" : { "age" : 35 } - } - ] + { "term" : { "title" : "Quick pets" }}, + { "term" : { "body" : "Quick pets" }} + ], + "tie_breaker" : 0.7 } } } --------------------------------------------------- +---- // CONSOLE + +[[query-dsl-dis-max-query-top-level-params]] +==== Top-level parameters for `dis_max` + +`queries`:: +(Required, array of query objects) Contains one or more query clauses. Returned +documents **must match one or more** of these queries. If a document matches +multiple queries, {es} uses the highest <>. + +`tie_breaker`:: ++ +-- +(Optional, float) Floating point number between `0` and `1.0` used to increase +the <> of documents matching multiple +query clauses. Defaults to `0.0`. + +You can use the `tie_breaker` value to assign higher relevance scores to +documents that contain the same term in multiple fields than documents that +contain this term in only the best of those multiple fields, without confusing +this with the better case of two different terms in the multiple fields. + +If a document matches multiple clauses, the `dis_max` query calculates the +relevance score for the document as follows: + +. Take the relevance score from a matching clause with the highest score. +. Multiply the score from any other matching clauses by the `tie_breaker` value. +. Add the highest score to the multiplied scores. + +If the `tie_breaker` value is greater than `0.0`, all matching clauses count, +but the clause with the highest score counts most. +-- \ No newline at end of file diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index b2e27c76e494d..92886e6a3c237 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -1,98 +1,54 @@ [[query-dsl-exists-query]] -=== Exists Query +=== Exists query +++++ +Exists +++++ -Returns documents that have at least one non-`null` value in the original field: +Returns documents that contain a value other than `null` or `[]` in a provided +field. + +[[exists-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "exists" : { "field" : "user" } - } -} --------------------------------------------------- -// CONSOLE - -For instance, these documents would all match the above query: - -[source,js] --------------------------------------------------- -{ "user": "jane" } -{ "user": "" } <1> -{ "user": "-" } <2> -{ "user": ["jane"] } -{ "user": ["jane", null ] } <3> --------------------------------------------------- -// NOTCONSOLE -<1> An empty string is a non-`null` value. -<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. -<3> At least one non-`null` value is required. - -These documents would *not* match the above query: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [] } <1> -{ "user": [null] } <2> -{ "foo": "bar" } <3> --------------------------------------------------- -// NOTCONSOLE -<1> This field has no values. -<2> At least one non-`null` value is required. -<3> The `user` field is missing completely. - -[float] -==== `null_value` mapping - -If the field mapping includes the <> setting -then explicit `null` values are replaced with the specified `null_value`. For -instance, if the `user` field were mapped as follows: - -[source,js] --------------------------------------------------- -PUT /example -{ - "mappings": { - "properties": { - "user": { - "type": "keyword", - "null_value": "_null_" - } + "exists": { + "field": "user" + } } - } } --------------------------------------------------- +---- // CONSOLE -then explicit `null` values would be indexed as the string `_null_`, and the -following docs would match the `exists` filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } --------------------------------------------------- -// NOTCONSOLE - -However, these docs--without explicit `null` values--would still have -no values in the `user` field and thus would not match the `exists` filter: +[[exists-query-top-level-params]] +==== Top-level parameters for `exists` +`field`:: +(Required, string) Name of the field you wish to search. ++ +To return a document, this field must exist and contain a value other +than `null` or `[]`. These values can include: ++ +* Empty strings, such as `""` or `"-"` +* Arrays containing `null` and another value, such as `[null, "foo"]` +* A custom <>, defined in field mapping + +[[exists-query-notes]] +==== Notes + +[[find-docs-null-values]] +===== Find documents with null values +To find documents that contain only `null` values or `[]` in a provided field, +use the `must_not` <> with the `exists` +query. + +The following search returns documents that contain only `null` values or `[]` +in the `user` field. [source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- -// NOTCONSOLE - -==== `missing` query - -There isn't a `missing` query. Instead use the `exists` query inside a -`must_not` clause as follows: - -[source,js] --------------------------------------------------- +---- GET /_search { "query": { @@ -105,7 +61,5 @@ GET /_search } } } --------------------------------------------------- -// CONSOLE - -This query returns documents that have no value in the user field. +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index f9714c1be3c8e..185541feface1 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -8,42 +8,38 @@ field being queried is <> and will apply each field's The queries in this group are: -<>:: +<>:: +A more specialized query which gives more preference to uncommon words. - The standard query for performing full text queries, including fuzzy matching - and phrase or proximity queries. +<>:: +A full text query that allows fine-grained control of the ordering and +proximity of matching terms -<>:: +<>:: +The standard query for performing full text queries, including fuzzy matching +and phrase or proximity queries. - Like the `match` query but used for matching exact phrases or word proximity matches. +<>:: +Like the `match` query but used for matching exact phrases or word proximity matches. <>:: - - The poor man's _search-as-you-type_. Like the `match_phrase` query, but does a wildcard search on the final word. +The poor man's _search-as-you-type_. Like the `match_phrase` query, but does a wildcard search on the final word. <>:: - - The multi-field version of the `match` query. - -<>:: - - A more specialized query which gives more preference to uncommon words. +The multi-field version of the `match` query. <>:: - - Supports the compact Lucene <>, - allowing you to specify AND|OR|NOT conditions and multi-field search - within a single query string. For expert users only. +Supports the compact Lucene <>, +allowing you to specify AND|OR|NOT conditions and multi-field search +within a single query string. For expert users only. <>:: +A simpler, more robust version of the `query_string` syntax suitable +for exposing directly to users. - A simpler, more robust version of the `query_string` syntax suitable - for exposing directly to users. - -<>:: +include::common-terms-query.asciidoc[] - A full text query that allows fine-grained control of the ordering and - proximity of matching terms +include::intervals-query.asciidoc[] include::match-query.asciidoc[] @@ -53,10 +49,6 @@ include::match-phrase-prefix-query.asciidoc[] include::multi-match-query.asciidoc[] -include::common-terms-query.asciidoc[] - include::query-string-query.asciidoc[] -include::simple-query-string-query.asciidoc[] - -include::intervals-query.asciidoc[] +include::simple-query-string-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 71fa61ee085e3..d5f20a57dc228 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-function-score-query]] -=== Function Score Query +=== Function score query +++++ +Function score +++++ The `function_score` allows you to modify the score of documents that are retrieved by a query. This can be useful if, for example, a score @@ -305,10 +308,14 @@ There are a number of options for the `field_value_factor` function: | Modifier | Meaning | `none` | Do not apply any multiplier to the field value -| `log` | Take the https://en.wikipedia.org/wiki/Common_logarithm[common logarithm] of the field value +| `log` | Take the https://en.wikipedia.org/wiki/Common_logarithm[common logarithm] of the field value. + Because this function will return a negative value and cause an error if used on values + between 0 and 1, it is recommended to use `log1p` instead. | `log1p` | Add 1 to the field value and take the common logarithm | `log2p` | Add 2 to the field value and take the common logarithm -| `ln` | Take the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] of the field value +| `ln` | Take the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] of the field value. + Because this function will return a negative value and cause an error if used on values + between 0 and 1, it is recommended to use `ln1p` instead. | `ln1p` | Add 1 to the field value and take the natural logarithm | `ln2p` | Add 2 to the field value and take the natural logarithm | `square` | Square the field value (multiply it by itself) @@ -321,14 +328,17 @@ There are a number of options for the `field_value_factor` function: Value used if the document doesn't have that field. The modifier and factor are still applied to it as though it were read from the document. +NOTE: Scores produced by the `field_value_score` function must be +non-negative, otherwise an error will be thrown. The `log` and `ln` modifiers +will produce negative values if used on values between 0 and 1. Be sure to limit +the values of the field with a range filter to avoid this, or use `log1p` and +`ln1p`. - Keep in mind that taking the log() of 0, or the square root of a negative number - is an illegal operation, and an exception will be thrown. Be sure to limit the - values of the field with a range filter to avoid this, or use `log1p` and - `ln1p`. +NOTE: Keep in mind that taking the log() of 0, or the square root of a +negative number is an illegal operation, and an exception will be thrown. Be +sure to limit the values of the field with a range filter to avoid this, or use +`log1p` and `ln1p`. - NOTE: Scores produced by the `field_value_score` function must be non-negative, - otherwise an error will be thrown. [[function-decay]] ==== Decay functions diff --git a/docs/reference/query-dsl/fuzzy-query.asciidoc b/docs/reference/query-dsl/fuzzy-query.asciidoc index 4be546916240f..06e6095361bb1 100644 --- a/docs/reference/query-dsl/fuzzy-query.asciidoc +++ b/docs/reference/query-dsl/fuzzy-query.asciidoc @@ -1,75 +1,102 @@ [[query-dsl-fuzzy-query]] -=== Fuzzy Query +=== Fuzzy query +++++ +Fuzzy +++++ -The fuzzy query uses similarity based on Levenshtein edit distance. +Returns documents that contain terms similar to the search term, as measured by +a http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein edit distance]. -==== String fields +An edit distance is the number of one-character changes needed to turn one term +into another. These changes can include: -The `fuzzy` query generates matching terms that are within the -maximum edit distance specified in `fuzziness` and then checks the term -dictionary to find out which of those generated terms actually exist in the -index. The final query uses up to `max_expansions` matching terms. +* Changing a character (**b**ox → **f**ox) +* Removing a character (**b**lack → lack) +* Inserting a character (sic → sic**k**) +* Transposing two adjacent characters (**ac**t → **ca**t) -Here is a simple example: +To find similar terms, the `fuzzy` query creates a set of all possible +variations, or expansions, of the search term within a specified edit distance. +The query then returns exact matches for each expansion. + +[[fuzzy-query-ex-request]] +==== Example requests + +[[fuzzy-query-ex-simple]] +===== Simple example [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "fuzzy" : { "user" : "ki" } + "fuzzy": { + "user": { + "value": "ki" + } + } } } --------------------------------------------------- +---- // CONSOLE -Or with more advanced settings: +[[fuzzy-query-ex-advanced]] +===== Example using advanced parameters [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "fuzzy" : { - "user" : { + "fuzzy": { + "user": { "value": "ki", - "boost": 1.0, - "fuzziness": 2, + "fuzziness": "AUTO", + "max_expansions": 50, "prefix_length": 0, - "max_expansions": 100 + "transpositions": true, + "rewrite": "constant_score" } } } } --------------------------------------------------- +---- // CONSOLE -[float] -===== Parameters +[[fuzzy-query-top-level-params]] +==== Top-level parameters for `fuzzy` +``:: +(Required, object) Field you wish to search. -[horizontal] -`fuzziness`:: - - The maximum edit distance. Defaults to `AUTO`. See <>. +[[fuzzy-query-field-params]] +==== Parameters for `` +`value`:: +(Required, string) Term you wish to find in the provided ``. -`prefix_length`:: +`fuzziness`:: +(Optional, string) Maximum edit distance allowed for matching. See <> +for valid values and more information. - The number of initial characters which will not be ``fuzzified''. This - helps to reduce the number of terms which must be examined. Defaults - to `0`. `max_expansions`:: ++ +-- +(Optional, integer) Maximum number of variations created. Defaults to `50`. - The maximum number of terms that the `fuzzy` query will expand to. - Defaults to `50`. - -`transpositions`:: - - Whether fuzzy transpositions (`ab` -> `ba`) are supported. - Default is `true`. +WARNING: Avoid using a high value in the `max_expansions` parameter, especially +if the `prefix_length` parameter value is `0`. High values in the +`max_expansions` parameter can cause poor performance due to the high number of +variations examined. +-- -WARNING: This query can be very heavy if `prefix_length` is set to `0` and if -`max_expansions` is set to a high number. It could result in every term in the -index being examined! +`prefix_length`:: +(Optional, integer) Number of beginning characters left unchanged when creating +expansions. Defaults to `0`. +`transpositions`:: +(Optional, boolean) Indicates whether edits include transpositions of two +adjacent characters (ab → ba). Defaults to `true`. +`rewrite`:: +(Optional, string) Method used to rewrite the query. For valid values and more +information, see the <>. \ No newline at end of file diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 487e944c09e10..fdb9463dd68e2 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-bounding-box-query]] -=== Geo Bounding Box Query +=== Geo-bounding box query +++++ +Geo-bounding box +++++ A query allowing to filter hits based on a point location using a bounding box. Assuming the following indexed document: diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index da7b0ecfd81e5..7a7f749687ee6 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-distance-query]] -=== Geo Distance Query +=== Geo-distance query +++++ +Geo-distance +++++ Filters documents that include only hits that exists within a specific distance from a geo point. Assuming the following mapping and indexed diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index c33b227824bdf..062e44cf03d0c 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-polygon-query]] -=== Geo Polygon Query +=== Geo-polygon query +++++ +Geo-polygon +++++ A query returning hits that only fall within a polygon of points. Here is an example: diff --git a/docs/reference/query-dsl/geo-queries.asciidoc b/docs/reference/query-dsl/geo-queries.asciidoc index 5220b00101e98..b3cc9112576cc 100644 --- a/docs/reference/query-dsl/geo-queries.asciidoc +++ b/docs/reference/query-dsl/geo-queries.asciidoc @@ -8,29 +8,24 @@ lines, circles, polygons, multi-polygons, etc. The queries in this group are: -<> query:: - - Finds documents with geo-shapes which either intersect, are contained by, or - do not intersect with the specified geo-shape. - <> query:: - - Finds documents with geo-points that fall into the specified rectangle. +Finds documents with geo-points that fall into the specified rectangle. <> query:: - - Finds documents with geo-points within the specified distance of a central - point. +Finds documents with geo-points within the specified distance of a central point. <> query:: +Find documents with geo-points within the specified polygon. - Find documents with geo-points within the specified polygon. - +<> query:: +Finds documents with geo-shapes which either intersect, are contained by, or do not intersect with the specified +geo-shape. -include::geo-shape-query.asciidoc[] include::geo-bounding-box-query.asciidoc[] include::geo-distance-query.asciidoc[] include::geo-polygon-query.asciidoc[] + +include::geo-shape-query.asciidoc[] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 424968090d6ab..79395f24e3ce9 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-geo-shape-query]] -=== GeoShape Query +=== Geo-shape query +++++ +Geo-shape +++++ Filter documents indexed using the `geo_shape` type. diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index ca23afb0fae26..fa04db8faa9fe 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -1,134 +1,161 @@ [[query-dsl-has-child-query]] -=== Has Child Query - -The `has_child` filter accepts a query and the child type to run against, and -results in parent documents that have child docs matching the query. Here is -an example: +=== Has child query +++++ +Has child +++++ + +Returns parent documents whose <> child documents match a +provided query. You can create parent-child relationships between documents in +the same index using a <> field mapping. + +[WARNING] +==== +Because it performs a join, the `has_child` is slow compared to other queries. +Its performance degrades as the number of matching child documents pointing to +unique parent documents increases. Each `has_child` query in a search can +increase query time significantly. + +If you care about query performance, do not use this query. If you need to use +the `has_child` query, use it as rarely as possible. +==== + +[[has-child-query-ex-request]] +==== Example request + +[[has-child-index-setup]] +===== Index setup +To use the `has_child` query, your index must include a <> +field mapping. For example: [source,js] --------------------------------------------------- -GET /_search +---- +PUT /my_index { - "query": { - "has_child" : { - "type" : "blog_tag", - "query" : { - "term" : { - "tag" : "something" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "parent": "child" } } } } } --------------------------------------------------- + +---- // CONSOLE +// TESTSETUP -Note that the `has_child` is a slow query compared to other queries in the -query dsl due to the fact that it performs a join. The performance degrades -as the number of matching child documents pointing to unique parent documents -increases. If you care about query performance you should not use this query. -However if you do happen to use this query then use it as little as possible. -Each `has_child` query that gets added to a search request can increase query -time significantly. - -[float] -==== Scoring capabilities - -The `has_child` also has scoring support. The -supported score modes are `min`, `max`, `sum`, `avg` or `none`. The default is -`none` and yields the same behaviour as in previous versions. If the -score mode is set to another value than `none`, the scores of all the -matching child documents are aggregated into the associated parent -documents. The score type can be specified with the `score_mode` field -inside the `has_child` query: +[[has-child-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", + "type" : "child", "query" : { - "term" : { - "tag" : "something" - } - } + "match_all" : {} + }, + "max_children": 10, + "min_children": 2, + "score_mode" : "min" } } } --------------------------------------------------- +---- // CONSOLE -[float] -==== Min/Max Children +[[has-child-top-level-params]] +==== Top-level parameters for `has_child` -The `has_child` query allows you to specify that a minimum and/or maximum -number of children are required to match for the parent doc to be considered -a match: +`type`:: +(Required, string) Name of the child relationship mapped for the +<> field. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "has_child" : { - "type" : "blog_tag", - "score_mode" : "min", - "min_children": 2, <1> - "max_children": 10, <1> - "query" : { - "term" : { - "tag" : "something" - } - } - } - } -} --------------------------------------------------- -// CONSOLE -<1> Both `min_children` and `max_children` are optional. +`query`:: +(Required, query object) Query you wish to run on child documents of the `type` +field. If a child document matches the search, the query returns the parent +document. + +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `type` and not +return any documents instead of an error. Defaults to `false`. + +If `false`, {es} returns an error if the `type` is unmapped. -The `min_children` and `max_children` parameters can be combined with -the `score_mode` parameter. +You can use this parameter to query multiple indices that may not contain the +`type`. +-- -[float] -==== Ignore Unmapped +`max_children`:: +(Optional, integer) Maximum number of child documents that match the `query` +allowed for a returned parent document. If the parent document exceeds this +limit, it is excluded from the search results. -When set to `true` the `ignore_unmapped` option will ignore an unmapped `type` -and will not match any documents for this query. This can be useful when -querying multiple indexes which might have different mappings. When set to -`false` (the default value) the query will throw an exception if the `type` -is not mapped. +`min_children`:: +(Optional, integer) Minimum number of child documents that match the `query` +required to match the query for a returned parent document. If the parent +document does not meet this limit, it is excluded from the search results. -[float] -==== Sorting +`score_mode`:: ++ +-- +(Optional, string) Indicates how scores for matching child documents affect the +root parent document's <>. Valid values +are: -Parent documents can't be sorted by fields in matching child documents via the -regular sort options. If you need to sort parent document by field in the child -documents then you should use the `function_score` query and then just sort -by `_score`. +`none` (Default):: +Do not use the relevance scores of matching child documents. The query assigns +parent documents a score of `0`. -Sorting blogs by child documents' `click_count` field: +`avg`:: +Use the mean relevance score of all matching child documents. + +`max`:: +Uses the highest relevance score of all matching child documents. + +`min`:: +Uses the lowest relevance score of all matching child documents. + +`sum`:: +Add together the relevance scores of all matching child documents. +-- + +[[has-child-query-notes]] +==== Notes + +[[has-child-query-performance]] +===== Sorting +You cannot sort the results of a `has_child` query using standard +<>. + +If you need to sort returned documents by a field in their child documents, use +a `function_score` query and sort by `_score`. For example, the following query +sorts returned documents by the `click_count` field of their child documents. [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_child" : { - "type" : "blog_tag", - "score_mode" : "max", + "type" : "child", "query" : { "function_score" : { "script_score": { "script": "_score * doc['click_count'].value" } } - } + }, + "score_mode" : "max" } } } --------------------------------------------------- +---- // CONSOLE diff --git a/docs/reference/query-dsl/has-parent-query.asciidoc b/docs/reference/query-dsl/has-parent-query.asciidoc index 4065a9d99fe2e..39cb22fb75094 100644 --- a/docs/reference/query-dsl/has-parent-query.asciidoc +++ b/docs/reference/query-dsl/has-parent-query.asciidoc @@ -1,93 +1,132 @@ [[query-dsl-has-parent-query]] -=== Has Parent Query +=== Has parent query +++++ +Has parent +++++ -The `has_parent` query accepts a query and a parent type. The query is -executed in the parent document space, which is specified by the parent -type. This query returns child documents which associated parents have -matched. For the rest `has_parent` query has the same options and works -in the same manner as the `has_child` query. +Returns child documents whose <> parent document matches a +provided query. You can create parent-child relationships between documents in +the same index using a <> field mapping. + +[WARNING] +==== +Because it performs a join, the `has_parent` query is slow compared to other queries. +Its performance degrades as the number of matching parent documents increases. +Each `has_parent` query in a search can increase query time significantly. +==== + +[[has-parent-query-ex-request]] +==== Example request + +[[has-parent-index-setup]] +===== Index setup +To use the `has_parent` query, your index must include a <> +field mapping. For example: [source,js] --------------------------------------------------- -GET /_search +---- +PUT /my-index { - "query": { - "has_parent" : { - "parent_type" : "blog", - "query" : { - "term" : { - "tag" : "something" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "parent": "child" } + }, + "tag" : { + "type" : "keyword" } } } } --------------------------------------------------- -// CONSOLE -Note that the `has_parent` is a slow query compared to other queries in the -query dsl due to the fact that it performs a join. The performance degrades -as the number of matching parent documents increases. If you care about query -performance you should not use this query. However if you do happen to use -this query then use it as less as possible. Each `has_parent` query that gets -added to a search request can increase query time significantly. - -[float] -==== Scoring capabilities +---- +// CONSOLE +// TESTSETUP -The `has_parent` also has scoring support. The default is `false` which -ignores the score from the parent document. The score is in this -case equal to the boost on the `has_parent` query (Defaults to 1). If -the score is set to `true`, then the score of the matching parent -document is aggregated into the child documents belonging to the -matching parent document. The score mode can be specified with the -`score` field inside the `has_parent` query: +[[has-parent-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- -GET /_search +---- +GET /my-index/_search { "query": { "has_parent" : { - "parent_type" : "blog", - "score" : true, + "parent_type" : "parent", "query" : { "term" : { - "tag" : "something" + "tag" : { + "value" : "Elasticsearch" + } } } } } } --------------------------------------------------- +---- // CONSOLE -[float] -==== Ignore Unmapped +[[has-parent-top-level-params]] +==== Top-level parameters for `has_parent` + +`parent_type`:: +(Required, string) Name of the parent relationship mapped for the +<> field. + +`query`:: +(Required, query object) Query you wish to run on parent documents of the +`parent_type` field. If a parent document matches the search, the query returns +its child documents. + +`score`:: ++ +-- +(Optional, boolean) Indicates whether the <> of a matching parent document is aggregated into its child documents. +Defaults to `false`. -When set to `true` the `ignore_unmapped` option will ignore an unmapped `type` -and will not match any documents for this query. This can be useful when -querying multiple indexes which might have different mappings. When set to -`false` (the default value) the query will throw an exception if the `type` -is not mapped. +If `false`, {es} ignores the relevance score of the parent document. {es} also +assigns each child document a relevance score equal to the `query`'s `boost`, +which defaults to `1`. -[float] -==== Sorting +If `true`, the relevance score of the matching parent document is aggregated +into its child documents' relevance scores. +-- -Child documents can't be sorted by fields in matching parent documents via the -regular sort options. If you need to sort child documents by field in the parent -documents then you should use the `function_score` query and then just sort -by `_score`. +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `parent_type` and +not return any documents instead of an error. Defaults to `false`. -Sorting tags by parent document' `view_count` field: +If `false`, {es} returns an error if the `parent_type` is unmapped. + +You can use this parameter to query multiple indices that may not contain the +`parent_type`. +-- + +[[has-parent-query-notes]] +==== Notes + +[[has-parent-query-performance]] +===== Sorting +You cannot sort the results of a `has_parent` query using standard +<>. + +If you need to sort returned documents by a field in their parent documents, use +a `function_score` query and sort by `_score`. For example, the following query +sorts returned documents by the `view_count` field of their parent documents. [source,js] --------------------------------------------------- +---- GET /_search { "query": { "has_parent" : { - "parent_type" : "blog", + "parent_type" : "parent", "score" : true, "query" : { "function_score" : { @@ -99,5 +138,5 @@ GET /_search } } } --------------------------------------------------- -// CONSOLE +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 8798a2fb093f8..feea48907459e 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -1,8 +1,13 @@ [[query-dsl-ids-query]] -=== Ids Query +=== IDs +++++ +IDs +++++ -Filters documents that only have the provided ids. Note, this query -uses the <> field. +Returns documents based on their IDs. This query uses document IDs stored in +the <> field. + +==== Example request [source,js] -------------------------------------------------- @@ -16,3 +21,9 @@ GET /_search } -------------------------------------------------- // CONSOLE + +[[ids-query-top-level-parameters]] +==== Top-level parameters for `ids` + +`values`:: +(Required, array of strings) An array of <>. \ No newline at end of file diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 27609e8565902..9595a084d6abc 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -1,17 +1,28 @@ [[query-dsl-intervals-query]] === Intervals query +++++ +Intervals +++++ -An `intervals` query allows fine-grained control over the order and proximity of -matching terms. Matching rules are constructed from a small set of definitions, -and the rules are then applied to terms from a particular `field`. +Returns documents based on the order and proximity of matching terms. + +The `intervals` query uses *matching rules*, constructed from a small set of +definitions. Theses rules are then applied to terms from a specified `field`. The definitions produce sequences of minimal intervals that span terms in a -body of text. These intervals can be further combined and filtered by +body of text. These intervals can be further combined and filtered by parent sources. -The example below will search for the phrase `my favourite food` appearing -before the terms `hot` and `water` or `cold` and `porridge` in any order, in -the field `my_text` + +[[intervals-query-ex-request]] +==== Example request + +The following `intervals` search returns documents containing `my +favorite food` immediately followed by `hot water` or `cold porridge` in the +`my_text` field. + +This search would match a `my_text` value of `my favorite food is cold +porridge` but not `when it's cold my favorite food is porridge`. [source,js] -------------------------------------------------- @@ -25,7 +36,7 @@ POST _search "intervals" : [ { "match" : { - "query" : "my favourite food", + "query" : "my favorite food", "max_gaps" : 0, "ordered" : true } @@ -39,9 +50,7 @@ POST _search } } ] - }, - "boost" : 2.0, - "_name" : "favourite_food" + } } } } @@ -49,72 +58,150 @@ POST _search -------------------------------------------------- // CONSOLE -In the above example, the text `my favourite food is cold porridge` would -match because the two intervals matching `my favourite food` and `cold -porridge` appear in the correct order, but the text `when it's cold my -favourite food is porridge` would not match, because the interval matching -`cold porridge` starts before the interval matching `my favourite food`. +[[intervals-top-level-params]] +==== Top-level parameters for `intervals` +[[intervals-rules]] +``:: ++ +-- +(Required, rule object) Field you wish to search. + +The value of this parameter is a rule object used to match documents +based on matching terms, order, and proximity. + +Valid rules include: + +* <> +* <> +* <> +* <> +-- [[intervals-match]] -==== `match` +==== `match` rule parameters -The `match` rule matches analyzed text, and takes the following parameters: +The `match` rule matches analyzed text. -[horizontal] `query`:: -The text to match. +(Required, string) Text you wish to find in the provided ``. + `max_gaps`:: -Specify a maximum number of gaps between the terms in the text. Terms that -appear further apart than this will not match. If unspecified, or set to -1, -then there is no width restriction on the match. If set to 0 then the terms -must appear next to each other. ++ +-- +(Optional, integer) Maximum number of positions between the matching terms. +Terms further apart than this are not considered matches. Defaults to +`-1`. + +If unspecified or set to `-1`, there is no width restriction on the match. If +set to `0`, the terms must appear next to each other. +-- + `ordered`:: -Whether or not the terms must appear in their specified order. Defaults to -`false` +(Optional, boolean) +If `true`, matching terms must appear in their specified order. Defaults to +`false`. + `analyzer`:: -Which analyzer should be used to analyze terms in the `query`. By -default, the search analyzer of the top-level field will be used. +(Optional, string) <> used to analyze terms in the `query`. +Defaults to the top-level ``'s analyzer. + `filter`:: -An optional <> +(Optional, <> rule object) An optional interval +filter. [[intervals-all_of]] -==== `all_of` +==== `all_of` rule parameters -`all_of` returns returns matches that span a combination of other rules. +The `all_of` rule returns matches that span a combination of other rules. -[horizontal] `intervals`:: -An array of rules to combine. All rules must produce a match in a -document for the overall source to match. +(Required, array of rule objects) An array of rules to combine. All rules must +produce a match in a document for the overall source to match. + `max_gaps`:: -Specify a maximum number of gaps between the rules. Combinations that match -across a distance greater than this will not match. If set to -1 or -unspecified, there is no restriction on this distance. If set to 0, then the -matches produced by the rules must all appear immediately next to each other. ++ +-- +(Optional, integer) Maximum number of positions between the matching terms. +Intervals produced by the rules further apart than this are not considered +matches. Defaults to `-1`. + +If unspecified or set to `-1`, there is no width restriction on the match. If +set to `0`, the terms must appear next to each other. +-- + `ordered`:: -Whether the intervals produced by the rules should appear in the order in -which they are specified. Defaults to `false` +(Optional, boolean) If `true`, intervals produced by the rules should appear in +the order in which they are specified. Defaults to `false`. + `filter`:: -An optional <> +(Optional, <> rule object) Rule used to filter +returned intervals. [[intervals-any_of]] -==== `any_of` +==== `any_of` rule parameters -The `any_of` rule emits intervals produced by any of its sub-rules. +The `any_of` rule returns intervals produced by any of its sub-rules. -[horizontal] `intervals`:: -An array of rules to match +(Required, array of rule objects) An array of rules to match. + `filter`:: -An optional <> +(Optional, <> rule object) Rule used to filter +returned intervals. [[interval_filter]] -==== filters +==== `filter` rule parameters + +The `filter` rule returns intervals based on a query. See +<> for an example. + +`after`:: +(Optional, query object) Query used to return intervals that follow an interval +from the `filter` rule. + +`before`:: +(Optional, query object) Query used to return intervals that occur before an +interval from the `filter` rule. + +`contained_by`:: +(Optional, query object) Query used to return intervals contained by an interval +from the `filter` rule. + +`containing`:: +(Optional, query object) Query used to return intervals that contain an interval +from the `filter` rule. + +`not_contained_by`:: +(Optional, query object) Query used to return intervals that are *not* +contained by an interval from the `filter` rule. + +`not_containing`:: +(Optional, query object) Query used to return intervals that do *not* contain +an interval from the `filter` rule. + +`not_overlapping`:: +(Optional, query object) Query used to return intervals that do *not* overlap +with an interval from the `filter` rule. + +`overlapping`:: +(Optional, query object) Query used to return intervals that overlap with an +interval from the `filter` rule. + +`script`:: +(Optional, <>) Script used to return +matching documents. This script must return a boolean value, `true` or `false`. +See <> for an example. -You can filter intervals produced by any rules by their relation to the -intervals produced by another rule. The following example will return -documents that have the words `hot` and `porridge` within 10 positions -of each other, without the word `salty` in between: + +[[intervals-query-note]] +==== Notes + +[[interval-filter-rule-ex]] +===== Filter example + +The following search includes a `filter` rule. It returns documents that have +the words `hot` and `porridge` within 10 positions of each other, without the +word `salty` in between: [source,js] -------------------------------------------------- @@ -141,25 +228,12 @@ POST _search -------------------------------------------------- // CONSOLE -The following filters are available: -[horizontal] -`containing`:: -Produces intervals that contain an interval from the filter rule -`contained_by`:: -Produces intervals that are contained by an interval from the filter rule -`not_containing`:: -Produces intervals that do not contain an interval from the filter rule -`not_contained_by`:: -Produces intervals that are not contained by an interval from the filter rule -`not_overlapping`:: -Produces intervals that do not overlap with an interval from the filter rule - [[interval-script-filter]] -==== Script filters +===== Script filters -You can also filter intervals based on their start position, end position and -internal gap count, using a script. The script has access to an `interval` -variable, with `start`, `end` and `gaps` methods: +You can use a script to filter intervals based on their start position, end +position, and internal gap count. The following `filter` script uses the +`interval` variable with the `start`, `end`, and `gaps` methods: [source,js] -------------------------------------------------- @@ -183,12 +257,13 @@ POST _search -------------------------------------------------- // CONSOLE + [[interval-minimization]] -==== Minimization +===== Minimization The intervals query always minimizes intervals, to ensure that queries can -run in linear time. This can sometimes cause surprising results, particularly -when using `max_gaps` restrictions or filters. For example, take the +run in linear time. This can sometimes cause surprising results, particularly +when using `max_gaps` restrictions or filters. For example, take the following query, searching for `salty` contained within the phrase `hot porridge`: @@ -216,15 +291,15 @@ POST _search -------------------------------------------------- // CONSOLE -This query will *not* match a document containing the phrase `hot porridge is +This query does *not* match a document containing the phrase `hot porridge is salty porridge`, because the intervals returned by the match query for `hot porridge` only cover the initial two terms in this document, and these do not overlap the intervals covering `salty`. Another restriction to be aware of is the case of `any_of` rules that contain -sub-rules which overlap. In particular, if one of the rules is a strict -prefix of the other, then the longer rule will never be matched, which can -cause surprises when used in combination with `max_gaps`. Consider the +sub-rules which overlap. In particular, if one of the rules is a strict +prefix of the other, then the longer rule can never match, which can +cause surprises when used in combination with `max_gaps`. Consider the following query, searching for `the` immediately followed by `big` or `big bad`, immediately followed by `wolf`: @@ -255,10 +330,10 @@ POST _search -------------------------------------------------- // CONSOLE -Counter-intuitively, this query *will not* match the document `the big bad -wolf`, because the `any_of` rule in the middle will only produce intervals +Counter-intuitively, this query does *not* match the document `the big bad +wolf`, because the `any_of` rule in the middle only produces intervals for `big` - intervals for `big bad` being longer than those for `big`, while -starting at the same position, and so being minimized away. In these cases, +starting at the same position, and so being minimized away. In these cases, it's better to rewrite the query so that all of the options are explicitly laid out at the top level: @@ -286,4 +361,4 @@ POST _search } } -------------------------------------------------- -// CONSOLE \ No newline at end of file +// CONSOLE diff --git a/docs/reference/query-dsl/joining-queries.asciidoc b/docs/reference/query-dsl/joining-queries.asciidoc index e40b8655066e5..69fcca8690079 100644 --- a/docs/reference/query-dsl/joining-queries.asciidoc +++ b/docs/reference/query-dsl/joining-queries.asciidoc @@ -6,13 +6,11 @@ prohibitively expensive. Instead, Elasticsearch offers two forms of join which are designed to scale horizontally. <>:: - Documents may contain fields of type <>. These fields are used to index arrays of objects, where each object can be queried (with the `nested` query) as an independent document. <> and <> queries:: - A <> can exist between documents within a single index. The `has_child` query returns parent documents whose child documents match the specified query, while the diff --git a/docs/reference/query-dsl/match-all-query.asciidoc b/docs/reference/query-dsl/match-all-query.asciidoc index 6e44882867624..31d4f64aef3b2 100644 --- a/docs/reference/query-dsl/match-all-query.asciidoc +++ b/docs/reference/query-dsl/match-all-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-all-query]] -== Match All Query +== Match all query +++++ +Match all +++++ The most simple query, which matches all documents, giving them all a `_score` of `1.0`. diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index 73f1be9143cf2..df7efa9b9ad60 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -1,27 +1,22 @@ [[query-dsl-match-query-phrase-prefix]] -=== Match Phrase Prefix Query +=== Match phrase prefix query +++++ +Match phrase prefix +++++ -The `match_phrase_prefix` is the same as `match_phrase`, except that it -allows for prefix matches on the last term in the text. For example: +Returns documents that contain the words of a provided text, in the **same +order** as provided. The last term of the provided text is treated as a +<>, matching any words that begin with that term. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "match_phrase_prefix" : { - "message" : "quick brown f" - } - } -} --------------------------------------------------- -// CONSOLE -It accepts the same parameters as the phrase type. In addition, it also -accepts a `max_expansions` parameter (default `50`) that can control to how -many suffixes the last term will be expanded. It is highly recommended to set -it to an acceptable value to control the execution time of the query. For -example: +[[match-phrase-prefix-query-ex-request]] +==== Example request + +The following search returns documents that contain phrases beginning with +`quick brown f` in the `message` field. + +This search would match a `message` value of `quick brown fox` or `two quick +brown ferrets` but not `the fox is quick and brown`. [source,js] -------------------------------------------------- @@ -30,8 +25,7 @@ GET /_search "query": { "match_phrase_prefix" : { "message" : { - "query" : "quick brown f", - "max_expansions" : 10 + "query" : "quick brown f" } } } @@ -39,21 +33,69 @@ GET /_search -------------------------------------------------- // CONSOLE -[IMPORTANT] -=================================================== -The `match_phrase_prefix` query is a poor-man's autocomplete. It is very easy -to use, which lets you get started quickly with _search-as-you-type_ but its -results, which usually are good enough, can sometimes be confusing. +[[match-phrase-prefix-top-level-params]] +==== Top-level parameters for `match_phrase_prefix` +``:: +(Required, object) Field you wish to search. + +[[match-phrase-prefix-field-params]] +==== Parameters for `` +`query`:: ++ +-- +(Required, string) Text you wish to find in the provided ``. + +The `match_phrase_prefix` query <> any provided text into +tokens before performing a search. The last term of this text is treated as a +<>, matching any words that begin with that term. +-- + +`analyzer`:: +(Optional, string) <> used to convert text in the `query` +value into tokens. Defaults to the <> mapped for the ``. If no analyzer is mapped, the index's +default analyzer is used. + +`max_expansions`:: +(Optional, integer) Maximum number of terms to which the last provided term of +the `query` value will expand. Defaults to `50`. + +`slop`:: +(Optional, integer) Maximum number of positions allowed between matching tokens. +Defaults to `0`. Transposed terms have a slop of `2`. + +`zero_terms_query`:: ++ +-- +(Optional, string) Indicates whether no documents are returned if the `analyzer` +removes all tokens, such as when using a `stop` filter. Valid values are: + + `none` (Default):: +No documents are returned if the `analyzer` removes all tokens. + + `all`:: +Returns all documents, similar to a <> +query. +-- + + +[[match-phrase-prefix-query-notes]] +==== Notes + +[[match-phrase-prefix-autocomplete]] +===== Using the match phrase prefix query for search autocompletion +While easy to set up, using the `match_phrase_prefix` query for search +autocompletion can sometimes produce confusing results. -Consider the query string `quick brown f`. This query works by creating a -phrase query out of `quick` and `brown` (i.e. the term `quick` must exist and -must be followed by the term `brown`). Then it looks at the sorted term -dictionary to find the first 50 terms that begin with `f`, and -adds these terms to the phrase query. +For example, consider the query string `quick brown f`. This query works by +creating a phrase query out of `quick` and `brown` (i.e. the term `quick` must +exist and must be followed by the term `brown`). Then it looks at the sorted +term dictionary to find the first 50 terms that begin with `f`, and adds these +terms to the phrase query. The problem is that the first 50 terms may not include the term `fox` so the -phrase `quick brown fox` will not be found. This usually isn't a problem as +phrase `quick brown fox` will not be found. This usually isn't a problem as the user will continue to type more letters until the word they are looking for appears. @@ -61,4 +103,3 @@ For better solutions for _search-as-you-type_ see the <> and {defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type]. -=================================================== diff --git a/docs/reference/query-dsl/match-phrase-query.asciidoc b/docs/reference/query-dsl/match-phrase-query.asciidoc index 1f4b19eedc132..ed847c419af60 100644 --- a/docs/reference/query-dsl/match-phrase-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-match-query-phrase]] -=== Match Phrase Query +=== Match phrase query +++++ +Match phrase +++++ The `match_phrase` query analyzes the text and creates a `phrase` query out of the analyzed text. For example: diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 5c397d603bef3..e7ebc96d9c623 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -1,9 +1,18 @@ [[query-dsl-match-query]] -=== Match Query +=== Match query +++++ +Match +++++ +Returns documents that match a provided text, number, date or boolean value. The +provided text is analyzed before matching. -`match` queries accept text/numerics/dates, analyzes -them, and constructs a query. For example: +The `match` query is the standard query for performing a full-text search, +including options for fuzzy matching. + + +[[match-query-ex-request]] +==== Example request [source,js] -------------------------------------------------- @@ -11,27 +20,175 @@ GET /_search { "query": { "match" : { - "message" : "this is a test" + "message" : { + "query" : "this is a test" + } } } } -------------------------------------------------- // CONSOLE -Note, `message` is the name of a field, you can substitute the name of -any field instead. + +[[match-top-level-params]] +==== Top-level parameters for `match` + +``:: +(Required, object) Field you wish to search. + + +[[match-field-params]] +==== Parameters for `` +`query`:: ++ +-- +(Required) Text, number, boolean value or date you wish to find in the provided +``. + +The `match` query <> any provided text before performing a +search. This means the `match` query can search <> fields for +analyzed tokens rather than an exact term. +-- + +`analyzer`:: +(Optional, string) <> used to convert the text in the `query` +value into tokens. Defaults to the <> mapped for the ``. If no analyzer is mapped, the index's +default analyzer is used. + +`auto_generate_synonyms_phrase_query`:: ++ +-- +(Optional, boolean) If `true`, <> +queries are automatically created for multi-term synonyms. Defaults to `true`. + +See <> for an +example. +-- + +`fuzziness`:: +(Optional, string) Maximum edit distance allowed for matching. See <> +for valid values and more information. See <> +for an example. + +`max_expansions`:: +(Optional, integer) Maximum number of terms to which the query will +expand. Defaults to `50`. + +`prefix_length`:: +(Optional, integer) Number of beginning characters left unchanged for fuzzy +matching. Defaults to `0`. + +`transpositions`:: +(Optional, boolean) If `true`, edits for fuzzy matching include +transpositions of two adjacent characters (ab → ba). Defaults to `true`. + +`fuzzy_rewrite`:: ++ +-- +(Optional, string) Method used to rewrite the query. See the +<> for valid values and more +information. + +If the `fuzziness` parameter is not `0`, the `match` query uses a `rewrite` +method of `top_terms_blended_freqs_${max_expansions}` by default. +-- + +`lenient`:: +(Optional, boolean) If `true`, format-based errors, such as providing a text +`query` value for a <> field, are ignored. Defaults to `false`. + +`operator`:: ++ +-- +(Optional, string) Boolean logic used to interpret text in the `query` value. +Valid values are: + +`OR` (Default):: +For example, a `query` value of `capital of Hungary` is interpreted as `capital +OR of OR Hungary`. + +`AND`:: +For example, a `query` value of `capital of Hungary` is interpreted as `capital +AND of AND Hungary`. +-- + +`minimum_should_match`:: ++ +-- +(Optional, string) Minimum number of clauses that must match for a document to +be returned. See the <> for valid values and more information. +-- + +`zero_terms_query`:: ++ +-- +(Optional, string) Indicates whether no documents are returned if the `analyzer` +removes all tokens, such as when using a `stop` filter. Valid values are: + +`none` (Default):: +No documents are returned if the `analyzer` removes all tokens. + +`all`:: +Returns all documents, similar to a <> +query. + +See <> for an example. +-- + + +[[match-query-notes]] +==== Notes + +[[query-dsl-match-query-short-ex]] +===== Short request example + +You can simplify the match query syntax by combining the `` and `query` +parameters. For example: + +[source,js] +---- +GET /_search +{ + "query": { + "match" : { + "message" : "this is a test" + } + } +} +---- +// CONSOLE [[query-dsl-match-query-boolean]] -==== match +===== How the match query works The `match` query is of type `boolean`. It means that the text provided is analyzed and the analysis process constructs a boolean query -from the provided text. The `operator` flag can be set to `or` or `and` +from the provided text. The `operator` parameter can be set to `or` or `and` to control the boolean clauses (defaults to `or`). The minimum number of optional `should` clauses to match can be set using the <> parameter. +Here is an example with the `operator` parameter: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match" : { + "message" : { + "query" : "this is a test", + "operator" : "and" + } + } + } +} +-------------------------------------------------- +// CONSOLE + The `analyzer` can be set to control which analyzer will perform the analysis process on the text. It defaults to the field explicit mapping definition, or the default search analyzer. @@ -41,7 +198,7 @@ data-type mismatches, such as trying to query a numeric field with a text query string. Defaults to `false`. [[query-dsl-match-query-fuzziness]] -===== Fuzziness +===== Fuzziness in the match query `fuzziness` allows _fuzzy matching_ based on the type of field being queried. See <> for allowed settings. @@ -56,8 +213,9 @@ rewritten. Fuzzy transpositions (`ab` -> `ba`) are allowed by default but can be disabled by setting `fuzzy_transpositions` to `false`. -Here is an example when providing additional parameters (note the slight -change in structure, `message` is the field name): +Note that fuzzy matching is not applied to terms with synonyms, as under the hood +these terms are expanded to a special synonym query that blends term frequencies, +which does not support fuzzy expansion. [source,js] -------------------------------------------------- diff --git a/docs/reference/query-dsl/minimum-should-match.asciidoc b/docs/reference/query-dsl/minimum-should-match.asciidoc index d6395da95e498..e0610ca42407e 100644 --- a/docs/reference/query-dsl/minimum-should-match.asciidoc +++ b/docs/reference/query-dsl/minimum-should-match.asciidoc @@ -1,5 +1,5 @@ [[query-dsl-minimum-should-match]] -== Minimum Should Match +== `minimum_should_match` parameter The `minimum_should_match` parameter possible values: diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index 509b4a9b44094..1d9de562083a8 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-mlt-query]] -=== More Like This Query +=== More like this query +++++ +More like this +++++ The More Like This Query finds documents that are "like" a given set of documents. In order to do so, MLT selects a set of representative terms @@ -232,9 +235,8 @@ The syntax is the same as the <Multi-match +++++ The `multi_match` query builds on the <> to allow multi-field queries: @@ -21,6 +24,7 @@ GET /_search <2> The fields to be queried. [float] +[[field-boost]] ==== `fields` and per-field boosting Fields can be specified with wildcards, eg: @@ -287,9 +291,9 @@ GET /_search -------------------------------------------------- // CONSOLE -Also, accepts `analyzer`, `boost`, `lenient`, `slop` and `zero_terms_query` as explained -in <>. Type `phrase_prefix` additionally accepts -`max_expansions`. +Also, accepts `analyzer`, <>, `lenient` and `zero_terms_query` as explained +in <>, as well as `slop` which is explained in <>. +Type `phrase_prefix` additionally accepts `max_expansions`. [IMPORTANT] [[phrase-fuzziness]] @@ -387,6 +391,7 @@ Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, `lenient`, `zero_terms_query` and `cutoff_frequency`, as explained in <>. +[[cross-field-analysis]] ===== `cross_field` and analysis The `cross_field` type can only work in term-centric mode on fields that have @@ -495,6 +500,7 @@ which will be executed as: blended("will", fields: [first, first.edge, last.edge, last]) blended("smith", fields: [first, first.edge, last.edge, last]) +[[tie-breaker]] ===== `tie_breaker` By default, each per-term `blended` query will use the best score returned by diff --git a/docs/reference/query-dsl/multi-term-rewrite.asciidoc b/docs/reference/query-dsl/multi-term-rewrite.asciidoc index 0d327a40fdea3..dbac78b5c3038 100644 --- a/docs/reference/query-dsl/multi-term-rewrite.asciidoc +++ b/docs/reference/query-dsl/multi-term-rewrite.asciidoc @@ -1,45 +1,109 @@ [[query-dsl-multi-term-rewrite]] -== Multi Term Query Rewrite - -Multi term queries, like -<> and -<> are called -multi term queries and end up going through a process of rewrite. This -also happens on the -<>. -All of those queries allow to control how they will get rewritten using -the `rewrite` parameter: - -* `constant_score` (default): A rewrite method that performs like -`constant_score_boolean` when there are few matching terms and otherwise -visits all matching terms in sequence and marks documents for that term. -Matching documents are assigned a constant score equal to the query's -boost. -* `scoring_boolean`: A rewrite method that first translates each term -into a should clause in a boolean query, and keeps the scores as -computed by the query. Note that typically such scores are meaningless -to the user, and require non-trivial CPU to compute, so it's almost -always better to use `constant_score`. This rewrite method will hit -too many clauses failure if it exceeds the boolean query limit (defaults -to `1024`). -* `constant_score_boolean`: Similar to `scoring_boolean` except scores -are not computed. Instead, each matching document receives a constant -score equal to the query's boost. This rewrite method will hit too many -clauses failure if it exceeds the boolean query limit (defaults to -`1024`). -* `top_terms_N`: A rewrite method that first translates each term into -should clause in boolean query, and keeps the scores as computed by the -query. This rewrite method only uses the top scoring terms so it will -not overflow boolean max clause count. The `N` controls the size of the -top scoring terms to use. -* `top_terms_boost_N`: A rewrite method that first translates each term -into should clause in boolean query, but the scores are only computed as -the boost. This rewrite method only uses the top scoring terms so it -will not overflow the boolean max clause count. The `N` controls the -size of the top scoring terms to use. -* `top_terms_blended_freqs_N`: A rewrite method that first translates each -term into should clause in boolean query, but all term queries compute scores -as if they had the same frequency. In practice the frequency which is used -is the maximum frequency of all matching terms. This rewrite method only uses -the top scoring terms so it will not overflow boolean max clause count. The -`N` controls the size of the top scoring terms to use. +== `rewrite` parameter + +WARNING: This parameter is for expert users only. Changing the value of +this parameter can impact search performance and relevance. + +{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to power +indexing and searching. In their original form, Lucene cannot execute the +following queries: + +* <> +* <> +* <> +* <> +* <> + +To execute them, Lucene changes these queries to a simpler form, such as a +<> or a +https://en.wikipedia.org/wiki/Bit_array[bit set]. + +The `rewrite` parameter determines: + +* How Lucene calculates the relevance scores for each matching document +* Whether Lucene changes the original query to a `bool` +query or bit set +* If changed to a `bool` query, which `term` query clauses are included + +[float] +[[rewrite-param-valid-values]] +=== Valid values + +`constant_score` (Default):: +Uses the `constant_score_boolean` method for fewer matching terms. Otherwise, +this method finds all matching terms in sequence and returns matching documents +using a bit set. + +`constant_score_boolean`:: +Assigns each document a relevance score equal to the `boost` +parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`scoring_boolean`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +This method can cause the final `bool` query to exceed the clause limit in the +<> +setting. If the query exceeds this limit, {es} returns an error. + +`top_terms_blended_freqs_N`:: +Calculates a relevance score for each matching document as if all terms had the +same frequency. This frequency is the maximum frequency of all matching terms. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` scoring +terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_boost_N`:: +Assigns each matching document a relevance score equal to the `boost` parameter. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query only includes `term` queries for the top `N` terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +`top_terms_N`:: +Calculates a relevance score for each matching document. ++ +This method changes the original query to a <>. This `bool` query contains a `should` clause and +<> for each matching term. ++ +The final `bool` query +only includes `term` queries for the top `N` scoring terms. ++ +You can use this method to avoid exceeding the clause limit in the +<> +setting. + +[float] +[[rewrite-param-perf-considerations]] +=== Performance considerations for the `rewrite` parameter +For most uses, we recommend using the `constant_score`, +`constant_score_boolean`, or `top_terms_boost_N` rewrite methods. + +Other methods calculate relevance scores. These score calculations are often +expensive and do not improve query results. \ No newline at end of file diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index c58d68b73cff1..5aabb8091057c 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -1,18 +1,29 @@ [[query-dsl-nested-query]] -=== Nested Query +=== Nested query +++++ +Nested +++++ -Nested query allows to query nested objects / docs (see -<>). The -query is executed against the nested objects / docs as if they were -indexed as separate docs (they are, internally) and resulting in the -root parent doc (or parent nested mapping). Here is a sample mapping we -will work with: +Wraps another query to search <> fields. + +The `nested` query searches nested field objects as if they were indexed as +separate documents. If an object matches the search, the `nested` query returns +the root parent document. + +[[nested-query-ex-request]] +==== Example request + +[[nested-query-index-setup]] +===== Index setup + +To use the `nested` query, your index must include a <> field +mapping. For example: [source,js] --------------------------------------------------- +---- PUT /my_index { - "mappings": { + "mappings" : { "properties" : { "obj1" : { "type" : "nested" @@ -21,20 +32,19 @@ PUT /my_index } } --------------------------------------------------- +---- // CONSOLE -// TESTSETUP -And here is a sample nested query usage: +[[nested-query-ex-query]] +===== Example query [source,js] --------------------------------------------------- -GET /_search +---- +GET /my_index/_search { - "query": { + "query": { "nested" : { "path" : "obj1", - "score_mode" : "avg", "query" : { "bool" : { "must" : [ @@ -42,29 +52,230 @@ GET /_search { "range" : {"obj1.count" : {"gt" : 5}} } ] } + }, + "score_mode" : "avg" + } + } +} +---- +// CONSOLE +// TEST[continued] + +[[nested-top-level-params]] +==== Top-level parameters for `nested` + +`path`:: +(Required, string) Path to the nested object you wish to search. + +`query`:: ++ +-- +(Required, query object) Query you wish to run on nested objects in the `path`. +If an object matches the search, the `nested` query returns the root parent +document. + +You can search nested fields using dot notation that includes the complete path, +such as `obj1.name`. + +Multi-level nesting is automatically supported, and detected, resulting in an +inner nested query to automatically match the relevant nesting level, rather +than root, if it exists within another nested query. + +See <> for an example. +-- + +`score_mode`:: ++ +-- +(Optional, string) Indicates how scores for matching child objects affect the +root parent document's <>. Valid values +are: + +`avg` (Default):: +Use the mean relevance score of all matching child objects. + +`max`:: +Uses the highest relevance score of all matching child objects. + +`min`:: +Uses the lowest relevance score of all matching child objects. + +`none`:: +Do not use the relevance scores of matching child objects. The query assigns +parent documents a score of `0`. + +`sum`:: +Add together the relevance scores of all matching child objects. +-- + +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `path` and not +return any documents instead of an error. Defaults to `false`. + +If `false`, {es} returns an error if the `path` is an unmapped field. + +You can use this parameter to query multiple indices that may not contain the +field `path`. +-- + +[[nested-query-notes]] +==== Notes + +[[multi-level-nested-query-ex]] +===== Multi-level nested queries + +To see how multi-level nested queries work, +first you need an index that has nested fields. +The following request defines mappings for the `drivers` index +with nested `make` and `model` fields. + +[source,js] +---- +PUT /drivers +{ + "mappings" : { + "properties" : { + "driver" : { + "type" : "nested", + "properties" : { + "last_name" : { + "type" : "text" + }, + "vehicle" : { + "type" : "nested", + "properties" : { + "make" : { + "type" : "text" + }, + "model" : { + "type" : "text" + } + } + } + } } } } } --------------------------------------------------- +---- // CONSOLE -The query `path` points to the nested object path, and the `query` -includes the query that will run on the nested docs matching the -direct path, and joining with the root parent docs. Note that any -fields referenced inside the query must use the complete path (fully -qualified). - -The `score_mode` allows to set how inner children matching affects -scoring of parent. It defaults to `avg`, but can be `sum`, `min`, -`max` and `none`. - -There is also an `ignore_unmapped` option which, when set to `true` will -ignore an unmapped `path` and will not match any documents for this query. -This can be useful when querying multiple indexes which might have different -mappings. When set to `false` (the default value) the query will throw an -exception if the `path` is not mapped. - -Multi level nesting is automatically supported, and detected, resulting -in an inner nested query to automatically match the relevant nesting -level (and not root) if it exists within another nested query. +Next, index some documents to the `drivers` index. + +[source,js] +---- +PUT /drivers/_doc/1 +{ + "driver" : { + "last_name" : "McQueen", + "vehicle" : [ + { + "make" : "Powell Motors", + "model" : "Canyonero" + }, + { + "make" : "Miller-Meteor", + "model" : "Ecto-1" + } + ] + } +} + +PUT /drivers/_doc/2?refresh +{ + "driver" : { + "last_name" : "Hudson", + "vehicle" : [ + { + "make" : "Mifune", + "model" : "Mach Five" + }, + { + "make" : "Miller-Meteor", + "model" : "Ecto-1" + } + ] + } +} +---- +// CONSOLE +// TEST[continued] + +You can now use a multi-level nested query +to match documents based on the `make` and `model` fields. + +[source,js] +---- +GET /drivers/_search +{ + "query" : { + "nested" : { + "path" : "driver", + "query" : { + "nested" : { + "path" : "driver.vehicle", + "query" : { + "bool" : { + "must" : [ + { "match" : { "driver.vehicle.make" : "Powell Motors" } }, + { "match" : { "driver.vehicle.model" : "Canyonero" } } + ] + } + } + } + } + } + } +} +---- +// CONSOLE +// TEST[continued] + +The search request returns the following response: + +[source,js] +---- +{ + "took" : 5, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 3.7349272, + "hits" : [ + { + "_index" : "drivers", + "_type" : "_doc", + "_id" : "1", + "_score" : 3.7349272, + "_source" : { + "driver" : { + "last_name" : "McQueen", + "vehicle" : [ + { + "make" : "Powell Motors", + "model" : "Canyonero" + }, + { + "make" : "Miller-Meteor", + "model" : "Ecto-1" + } + ] + } + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took" : 5/"took": $body.took/] diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index aa2074e7d1b7e..3add028c5c19c 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -1,68 +1,116 @@ [[query-dsl-parent-id-query]] -=== Parent Id Query +=== Parent ID query +++++ +Parent ID +++++ -The `parent_id` query can be used to find child documents which belong to a particular parent. -Given the following mapping definition: +Returns child documents <> to a specific parent document. +You can use a <> field mapping to create parent-child +relationships between documents in the same index. +[[parent-id-query-ex-request]] +==== Example request + +[[parent-id-index-setup]] +===== Index setup +To use the `parent_id` query, your index must include a <> +field mapping. To see how you can set up an index for the `parent_id` query, try +the following example. + +. Create an index with a <> field mapping. ++ +-- [source,js] --------------------------------------------- -PUT my_index +---- +PUT /my-index { - "mappings": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "my_parent": "my_child" + "mappings": { + "properties" : { + "my-join-field" : { + "type" : "join", + "relations": { + "my-parent": "my-child" + } + } } - } } - } } -PUT my_index/_doc/1?refresh +---- +// CONSOLE +// TESTSETUP +-- + +. Index a parent document with an ID of `1`. ++ +-- +[source,js] +---- +PUT /my-index/_doc/1?refresh { - "text": "This is a parent document", - "my_join_field": "my_parent" + "text": "This is a parent document.", + "my-join-field": "my-parent" } +---- +// CONSOLE +-- -PUT my_index/_doc/2?routing=1&refresh +. Index a child document of the parent document. ++ +-- +[source,js] +---- +PUT /my-index/_doc/2?routing=1&refresh { - "text": "This is a child document", + "text": "This is a child document.", "my_join_field": { - "name": "my_child", + "name": "my-child", "parent": "1" } } - --------------------------------------------- +---- // CONSOLE -// TESTSETUP +-- + +[[parent-id-query-ex-query]] +===== Example query + +The following search returns child documents for a parent document with an ID of +`1`. [source,js] --------------------------------------------------- -GET /my_index/_search +---- +GET /my-index/_search { "query": { - "parent_id": { - "type": "my_child", - "id": "1" - } + "parent_id": { + "type": "my-child", + "id": "1" + } } } --------------------------------------------------- +---- // CONSOLE +[[parent-id-top-level-params]] +==== Top-level parameters for `parent_id` + +`type`:: +(Required, string) Name of the child relationship mapped for the +<> field. -==== Parameters +`id`:: +(Required, string) ID of the parent document. The query will return child +documents of this parent document. -This query has two required parameters: +`ignore_unmapped`:: ++ +-- +(Optional, boolean) Indicates whether to ignore an unmapped `type` and not +return any documents instead of an error. Defaults to `false`. -[horizontal] -`type`:: The **child** type name, as specified in the <>. -`id`:: The ID of the parent document. +If `false`, {es} returns an error if the `type` is unmapped. -`ignore_unmapped`:: When set to `true` this will ignore an unmapped `type` and will not match any -documents for this query. This can be useful when querying multiple indexes -which might have different mappings. When set to `false` (the default value) -the query will throw an exception if the `type` is not mapped. +You can use this parameter to query multiple indices that may not contain the +`type`. +-- diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 89264af0f2619..314c30bfc1a9b 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-percolate-query]] -=== Percolate Query +=== Percolate query +++++ +Percolate +++++ The `percolate` query can be used to match queries stored in an index. The `percolate` query itself @@ -134,7 +137,7 @@ The following parameters are required when percolating a document: This is an optional parameter. `document`:: The source of the document being percolated. `documents`:: Like the `document` parameter, but accepts multiple documents via a json array. -`document_type`:: The type / mapping of the document being percolated. This setting is deprecated and only required for indices created before 6.0 +`document_type`:: The type / mapping of the document being percolated. This parameter is deprecated and will be removed in Elasticsearch 8.0. Instead of specifying the source of the document being percolated, the source can also be retrieved from an already stored document. The `percolate` query will then internally execute a get request to fetch that document. @@ -143,7 +146,7 @@ In that case the `document` parameter can be substituted with the following para [horizontal] `index`:: The index the document resides in. This is a required parameter. -`type`:: The type of the document to fetch. This is a required parameter. +`type`:: The type of the document to fetch. This parameter is deprecated and will be removed in Elasticsearch 8.0. `id`:: The id of the document to fetch. This is a required parameter. `routing`:: Optionally, routing to be used to fetch document to percolate. `preference`:: Optionally, preference to be used to fetch document to percolate. @@ -323,7 +326,6 @@ GET /my-index/_search "percolate" : { "field": "query", "index" : "my-index", - "type" : "_doc", "id" : "2", "version" : 1 <1> } diff --git a/docs/reference/query-dsl/prefix-query.asciidoc b/docs/reference/query-dsl/prefix-query.asciidoc index 54d69583e990c..25cf0fc5bf8ea 100644 --- a/docs/reference/query-dsl/prefix-query.asciidoc +++ b/docs/reference/query-dsl/prefix-query.asciidoc @@ -1,33 +1,69 @@ [[query-dsl-prefix-query]] -=== Prefix Query +=== Prefix query +++++ +Prefix +++++ -Matches documents that have fields containing terms with a specified -prefix (*not analyzed*). The prefix query maps to Lucene `PrefixQuery`. -The following matches documents where the user field contains a term -that starts with `ki`: +Returns documents that contain a specific prefix in a provided field. + +[[prefix-query-ex-request]] +==== Example request + +The following search returns documents where the `user` field contains a term +that begins with `ki`. [source,js] --------------------------------------------------- +---- GET /_search -{ "query": { - "prefix" : { "user" : "ki" } - } +{ + "query": { + "prefix": { + "user": { + "value": "ki" + } + } + } } --------------------------------------------------- +---- // CONSOLE -A boost can also be associated with the query: +[[prefix-query-top-level-params]] +==== Top-level parameters for `prefix` +``:: +(Required, object) Field you wish to search. + +[[prefix-query-field-params]] +==== Parameters for `` +`value`:: +(Required, string) Beginning characters of terms you wish to find in the +provided ``. + +`rewrite`:: +(Optional, string) Method used to rewrite the query. For valid values and more +information, see the <>. + +[[prefix-query-notes]] +==== Notes + +[[prefix-query-short-ex]] +===== Short request example +You can simplify the `prefix` query syntax by combining the `` and +`value` parameters. For example: [source,js] --------------------------------------------------- +---- GET /_search -{ "query": { - "prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } } - } +{ + "query": { + "prefix" : { "user" : "ki" } + } } --------------------------------------------------- +---- // CONSOLE -This multi term query allows you to control how it gets rewritten using the -<> -parameter. +[[prefix-query-index-prefixes]] +===== Speed up prefix queries +You can speed up prefix queries using the <> +mapping parameter. If enabled, {es} indexes prefixes between 2 and 5 +characters in a separate field. This lets {es} run prefix queries more +efficiently at the cost of a larger index. \ No newline at end of file diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index ce7690868ecc6..967dd906eec3f 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-query-string-query]] -=== Query String Query +=== Query string query +++++ +Query string +++++ A query that uses a query parser in order to parse its content. Here is an example: @@ -118,8 +121,7 @@ both>>. |`lenient` |If set to `true` will cause format based failures (like providing text to a numeric field) to be ignored. -|`time_zone` | Time Zone to be applied to any range query related to dates. See also -http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[JODA timezone]. +|`time_zone` | Time Zone to be applied to any range query related to dates. |`quote_field_suffix` | A suffix to append to fields for quoted parts of the query string. This allows to use a field that has a different analysis chain diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 765b54b588359..a2396972e3d0e 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -272,12 +272,26 @@ of a sub-query: If you need to use any of the characters which function as operators in your query itself (and not as operators), then you should escape them with a leading backslash. For instance, to search for `(1+1)=2`, you would -need to write your query as `\(1\+1\)\=2`. +need to write your query as `\(1\+1\)\=2`. When using JSON for the request body, two preceding backslashes (`\\`) are required; the backslash is a reserved escaping character in JSON strings. + +[source,js] +---- +GET /twitter/_search +{ + "query" : { + "query_string" : { + "query" : "kimchy\\!", + "fields" : ["user"] + } + } +} +---- +// CONSOLE +// TEST[setup:twitter] The reserved characters are: `+ - = && || > < ! ( ) { } [ ] ^ " ~ * ? : \ /` -Failing to escape these special characters correctly could lead to a syntax -error which prevents your query from running. +Failing to escape these special characters correctly could lead to a syntax error which prevents your query from running. NOTE: `<` and `>` can't be escaped at all. The only way to prevent them from attempting to create a range query is to remove them from the query string diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index 79f8c4bd960b0..a454c353851ed 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -1,27 +1,38 @@ [[query-filter-context]] == Query and filter context -The behaviour of a query clause depends on whether it is used in _query context_ or -in _filter context_: +[float] +[[relevance-scores]] +=== Relevance scores -Query context:: -+ --- -A query clause used in query context answers the question ``__How well does this +By default, Elasticsearch sorts matching search results by **relevance +score**, which measures how well each document matches a query. + +The relevance score is a positive floating point number, returned in the +`_score` meta-field of the <> API. The higher the +`_score`, the more relevant the document. While each query type can calculate +relevance scores differently, score calculation also depends on whether the +query clause is run in a **query** or **filter** context. + +[float] +[[query-context]] +=== Query context +In the query context, a query clause answers the question ``__How well does this document match this query clause?__'' Besides deciding whether or not the -document matches, the query clause also calculates a `_score` representing how -well the document matches, relative to other documents. +document matches, the query clause also calculates a relevance score in the +`_score` meta-field. -Query context is in effect whenever a query clause is passed to a `query` parameter, -such as the `query` parameter in the <> API. --- +Query context is in effect whenever a query clause is passed to a `query` +parameter, such as the `query` parameter in the +<> API. -Filter context:: -+ --- -In _filter_ context, a query clause answers the question ``__Does this document -match this query clause?__'' The answer is a simple Yes or No -- no scores are -calculated. Filter context is mostly used for filtering structured data, e.g. +[float] +[[filter-context]] +=== Filter context +In a filter context, a query clause answers the question ``__Does this +document match this query clause?__'' The answer is a simple Yes or No -- no +scores are calculated. Filter context is mostly used for filtering structured +data, e.g. * __Does this +timestamp+ fall into the range 2015 to 2016?__ * __Is the +status+ field set to ++"published"++__? @@ -34,8 +45,10 @@ parameter, such as the `filter` or `must_not` parameters in the <> query, the `filter` parameter in the <> query, or the <> aggregation. --- +[float] +[[query-filter-context-ex]] +=== Example of query and filter contexts Below is an example of query clauses being used in query and filter context in the `search` API. This query will match documents where all of the following conditions are met: @@ -75,4 +88,4 @@ GET /_search TIP: Use query clauses in query context for conditions which should affect the score of matching documents (i.e. how well does the document match), and use -all other query clauses in filter context. +all other query clauses in filter context. \ No newline at end of file diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index c5087d52f905e..7124b97713474 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -1,14 +1,19 @@ [[query-dsl-range-query]] -=== Range Query +=== Range query +++++ +Range +++++ -Matches documents with fields that have terms within a certain range. -The type of the Lucene query depends on the field type, for `string` -fields, the `TermRangeQuery`, while for number/date fields, the query is -a `NumericRangeQuery`. The following example returns all documents where -`age` is between `10` and `20`: +Returns documents that contain terms within a provided range. + +[[range-query-ex-request]] +==== Example request + +The following search returns documents where the `age` field contains a term +between `10` and `20`. [source,js] --------------------------------------------------- +---- GET _search { "query": { @@ -21,144 +26,209 @@ GET _search } } } --------------------------------------------------- +---- // CONSOLE -The `range` query accepts the following parameters: +[[range-query-top-level-params]] +==== Top-level parameters for `range` + +``:: ++ +-- +(Required, object) Field you wish to search. +-- + +[[range-query-field-params]] +==== Parameters for `` + +`gt`:: +(Optional) Greater than. + +`gte`:: +(Optional) Greater than or equal to. + +`lt`:: +(Optional) Less than. + +`lte`:: +(Optional) Less than or equal to. + +`format`:: ++ +-- +(Optional, string) Date format used to convert `date` values in the query. + +By default, {es} uses the <> provided in the +``'s mapping. This value overrides that mapping format. -[horizontal] -`gte`:: Greater-than or equal to -`gt`:: Greater-than -`lte`:: Less-than or equal to -`lt`:: Less-than -`boost`:: Sets the boost value of the query, defaults to `1.0` +For valid syntax, see <>. +[WARNING] +==== +If a `format` and `date` value are incomplete, {es} replaces any missing year, +month, or date component with the start of +https://en.wikipedia.org/wiki/Unix_time[Unix time], which is January 1st, 1970. + +For example, if the `format` value is `dd`, {es} converts a `gte` value of `10` +to `1970-01-10T00:00:00.000Z`. +==== + +-- + +[[querying-range-fields]] +`relation`:: ++ +-- +(Optional, string) Indicates how the range query matches values for `range` +fields. Valid values are: + +`INTERSECTS` (Default):: +Matches documents with a range field value that intersects the query's range. + +`CONTAINS`:: +Matches documents with a range field value that entirely contains the query's range. + +`WITHIN`:: +Matches documents with a range field value entirely within the query's range. +-- + +`time_zone`:: ++ +-- +(Optional, string) +https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal +Time (UTC) offset] or +https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[IANA time zone] +used to convert `date` values in the query to UTC. + +Valid values are ISO 8601 UTC offsets, such as `+01:00` or -`08:00`, and IANA +time zone IDs, such as `America/Los_Angeles`. + +For an example query using the `time_zone` parameter, see +<>. + +[NOTE] +==== +The `time_zone` parameter does **not** affect the <> value +of `now`. `now` is always the current system time in UTC. + +However, the `time_zone` parameter does convert dates calculated using `now` and +<>. For example, the `time_zone` parameter will +convert a value of `now/d`. +==== +-- + +`boost`:: ++ +-- +(Optional, float) Floating point number used to decrease or increase the +<> of a query. Defaults to `1.0`. + +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. + +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. +-- + +[[range-query-notes]] +==== Notes [[ranges-on-dates]] -==== Ranges on date fields +===== Using the `range` query with `date` fields + +When the `` parameter is a <> field datatype, you can use +<> with the following parameters: -When running `range` queries on fields of type <>, ranges can be -specified using <>: +* `gt` +* `gte` +* `lt` +* `lte` + +For example, the following search returns documents where the `timestamp` field +contains a date between today and yesterday. [source,js] --------------------------------------------------- +---- GET _search { "query": { "range" : { - "date" : { + "timestamp" : { "gte" : "now-1d/d", "lt" : "now/d" } } } } --------------------------------------------------- +---- // CONSOLE -===== Date math and rounding - -When using <> to round dates to the nearest day, month, -hour, etc, the rounded dates depend on whether the ends of the ranges are -inclusive or exclusive. -Rounding up moves to the last millisecond of the rounding scope, and rounding -down to the first millisecond of the rounding scope. For example: +[[range-query-date-math-rounding]] +====== Date math and rounding +{es} rounds <> values in parameters as follows: -[horizontal] `gt`:: ++ +-- +Rounds up to the lastest millisecond. - Greater than the date rounded up: `2014-11-18||/M` becomes - `2014-11-30T23:59:59.999`, ie excluding the entire month. +For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, excluding +the entire month. +-- `gte`:: ++ +-- +Rounds down to the first millisecond. - Greater than or equal to the date rounded down: `2014-11-18||/M` becomes - `2014-11-01`, ie including the entire month. +For example, `2014-11-18||/M` rounds down to `2014-11-01`, including +the entire month. +-- `lt`:: ++ +-- +Rounds down to the first millisecond. - Less than the date rounded down: `2014-11-18||/M` becomes `2014-11-01`, ie - excluding the entire month. +For example, `2014-11-18||/M` rounds down to `2014-11-01`, excluding +the entire month. +-- `lte`:: ++ +-- +Rounds up to the lastest millisecond. - Less than or equal to the date rounded up: `2014-11-18||/M` becomes - `2014-11-30T23:59:59.999`, ie including the entire month. +For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, including +the entire month. +-- -===== Date format in range queries +[[range-query-time-zone]] +===== Example query using `time_zone` parameter -Formatted dates will be parsed using the <> -specified on the <> field by default, but it can be overridden by -passing the `format` parameter to the `range` query: +You can use the `time_zone` parameter to convert `date` values to UTC using a +UTC offset. For example: [source,js] --------------------------------------------------- -GET _search -{ - "query": { - "range" : { - "born" : { - "gte": "01/01/2012", - "lte": "2013", - "format": "dd/MM/yyyy||yyyy" - } - } - } -} --------------------------------------------------- -// CONSOLE - -Note that if the date misses some of the year, month and day coordinates, the -missing parts are filled with the start of -https://en.wikipedia.org/wiki/Unix_time[unix time], which is January 1st, 1970. -This means, that when e.g. specifying `dd` as the format, a value like `"gte" : 10` -will translate to `1970-01-10T00:00:00.000Z`. - -===== Time zone in range queries - -Dates can be converted from another timezone to UTC either by specifying the -time zone in the date value itself (if the <> -accepts it), or it can be specified as the `time_zone` parameter: - -[source,js] --------------------------------------------------- +---- GET _search { "query": { "range" : { "timestamp" : { - "gte": "2015-01-01 00:00:00", <1> - "lte": "now", <2> - "time_zone": "+01:00" + "time_zone": "+01:00", <1> + "gte": "2015-01-01 00:00:00", <2> + "lte": "now" <3> } } } } --------------------------------------------------- +---- // CONSOLE -<1> This date will be converted to `2014-12-31T23:00:00 UTC`. -<2> `now` is not affected by the `time_zone` parameter (dates must be stored as UTC). - -[[querying-range-fields]] -==== Querying range fields - -`range` queries can be used on fields of type <>, allowing to -match a range specified in the query with a range field value in the document. -The `relation` parameter controls how these two ranges are matched: - -[horizontal] -`WITHIN`:: - - Matches documents who's range field is entirely within the query's range. - -`CONTAINS`:: - - Matches documents who's range field entirely contains the query's range. - -`INTERSECTS`:: - - Matches documents who's range field intersects the query's range. - This is the default value when querying range fields. - -For examples, see <> mapping type. +<1> Indicates that `date` values use a UTC offset of `+01:00`. +<2> With a UTC offset of `+01:00`, {es} converts this date to +`2014-12-31T23:00:00 UTC`. +<3> The `time_zone` parameter does not affect the `now` value. diff --git a/docs/reference/query-dsl/rank-feature-query.asciidoc b/docs/reference/query-dsl/rank-feature-query.asciidoc index fe23c5f3ec26f..9a132e3e5d381 100644 --- a/docs/reference/query-dsl/rank-feature-query.asciidoc +++ b/docs/reference/query-dsl/rank-feature-query.asciidoc @@ -1,33 +1,61 @@ [[query-dsl-rank-feature-query]] -=== Rank Feature Query - -The `rank_feature` query is a specialized query that only works on -<> fields and <> fields. -Its goal is to boost the score of documents based on the values of numeric -features. It is typically put in a `should` clause of a -<> query so that its score is added to the score -of the query. - -Compared to using <> or other -ways to modify the score, this query has the benefit of being able to -efficiently skip non-competitive hits when -<> is not set to `true`. Speedups may be -spectacular. - -Here is an example that indexes various features: - - https://en.wikipedia.org/wiki/PageRank[`pagerank`], a measure of the - importance of a website, - - `url_length`, the length of the url, which typically correlates negatively - with relevance, - - `topics`, which associates a list of topics with every document alongside a - measure of how well the document is connected to this topic. - -Then the example includes an example query that searches for `"2016"` and boosts -based or `pagerank`, `url_length` and the `sports` topic. +=== Rank feature query +++++ +Rank feature +++++ + +Boosts the <> of documents based on the +numeric value of a <> or +<> field. + +The `rank_feature` query is typically used in the `should` clause of a +<> query so its relevance scores are added to other +scores from the `bool` query. + +Unlike the <> query or other +ways to change <>, the +`rank_feature` query efficiently skips non-competitive hits when the +<> parameter is **not** `true`. This can +dramatically improve query speed. + +[[rank-feature-query-functions]] +==== Rank feature functions + +To calculate relevance scores based on rank feature fields, the `rank_feature` +query supports the following mathematical functions: + +* <> +* <> +* <> + +If you don't know where to start, we recommend using the `saturation` function. +If no function is provided, the `rank_feature` query uses the `saturation` +function by default. + +[[rank-feature-query-ex-request]] +==== Example request + +[[rank-feature-query-index-setup]] +===== Index setup + +To use the `rank_feature` query, your index must include a +<> or <> field +mapping. To see how you can set up an index for the `rank_feature` query, try +the following example. + +Create a `test` index with the following field mappings: + +- `pagerank`, a <> field which measures the +importance of a website +- `url_length`, a <> field which contains the +length of the website's URL. For this example, a long URL correlates negatively +to relevance, indicated by a `positive_score_impact` value of `false`. +- `topics`, a <> field which contains a list of +topics and a measure of how well each document is connected to this topic [source,js] --------------------------------------------------- -PUT test +---- +PUT /test { "mappings": { "properties": { @@ -44,8 +72,16 @@ PUT test } } } +---- +// CONSOLE +// TESTSETUP + -PUT test/_doc/1 +Index several documents to the `test` index. + +[source,js] +---- +PUT /test/_doc/1?refresh { "url": "http://en.wikipedia.org/wiki/2016_Summer_Olympics", "content": "Rio 2016", @@ -57,10 +93,10 @@ PUT test/_doc/1 } } -PUT test/_doc/2 +PUT /test/_doc/2?refresh { "url": "http://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", - "content": "Formula One motor race held on 13 November 2016 at the Autódromo José Carlos Pace in São Paulo, Brazil", + "content": "Formula One motor race held on 13 November 2016", "pagerank": 50.3, "url_length": 47, "topics": { @@ -70,7 +106,7 @@ PUT test/_doc/2 } } -PUT test/_doc/3 +PUT /test/_doc/3?refresh { "url": "http://en.wikipedia.org/wiki/Deadpool_(film)", "content": "Deadpool is a 2016 American superhero film", @@ -81,10 +117,18 @@ PUT test/_doc/3 "super hero": 65 } } +---- +// CONSOLE + +[[rank-feature-query-ex-query]] +===== Example query -POST test/_refresh +The following query searches for `2016` and boosts relevance scores based or +`pagerank`, `url_length`, and the `sports` topic. -GET test/_search +[source,js] +---- +GET /test/_search { "query": { "bool": { @@ -117,31 +161,80 @@ GET test/_search } } } --------------------------------------------------- +---- // CONSOLE -[float] -=== Supported functions -The `rank_feature` query supports 3 functions in order to boost scores using the -values of rank features. If you do not know where to start, we recommend that you -start with the `saturation` function, which is the default when no function is -provided. +[[rank-feature-top-level-params]] +==== Top-level parameters for `rank_feature` + +`field`:: +(Required, string) <> or +<> field used to boost +<>. + +`boost`:: ++ +-- +(Optional, float) Floating point number used to decrease or increase +<>. Defaults to `1.0`. + +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. +-- + +`saturation`:: ++ +-- +(Optional, <>) Saturation +function used to boost <> based on the +value of the rank feature `field`. If no function is provided, the `rank_feature` +query defaults to the `saturation` function. See +<> for more information. + +Only one function `saturation`, `log`, or `sigmoid` can be provided. +-- -[float] -==== Saturation +`log`:: ++ +-- +(Optional, <>) Logarithmic +function used to boost <> based on the +value of the rank feature `field`. See +<> for more information. -This function gives a score that is equal to `S / (S + pivot)` where `S` is the -value of the rank feature and `pivot` is a configurable pivot value so that the -result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ -otherwise. Scores are always is +(0, 1)+. +Only one function `saturation`, `log`, or `sigmoid` can be provided. +-- -If the rank feature has a negative score impact then the function will be computed as -`pivot / (S + pivot)`, which decreases when `S` increases. +`sigmoid`:: ++ +-- +(Optional, <>) Sigmoid function used +to boost <> based on the value of the +rank feature `field`. See <> for more +information. + +Only one function `saturation`, `log`, or `sigmoid` can be provided. +-- + + +[[rank-feature-query-notes]] +==== Notes + +[[rank-feature-query-saturation]] +===== Saturation +The `saturation` function gives a score equal to `S / (S + pivot)`, where `S` is +the value of the rank feature field and `pivot` is a configurable pivot value so +that the result will be less than `0.5` if `S` is less than pivot and greater +than `0.5` otherwise. Scores are always `(0,1)`. + +If the rank feature has a negative score impact then the function will be +computed as `pivot / (S + pivot)`, which decreases when `S` increases. [source,js] -------------------------------------------------- -GET test/_search +GET /test/_search { "query": { "rank_feature": { @@ -154,16 +247,15 @@ GET test/_search } -------------------------------------------------- // CONSOLE -// TEST[continued] -If +pivot+ is not supplied then Elasticsearch will compute a default value that -will be approximately equal to the geometric mean of all feature values that -exist in the index. We recommend this if you haven't had the opportunity to -train a good pivot value. +If a `pivot` value is not provided, {es} computes a default value equal to the +approximate geometric mean of all rank feature values in the index. We recommend +using this default value if you haven't had the opportunity to train a good +pivot value. [source,js] -------------------------------------------------- -GET test/_search +GET /test/_search { "query": { "rank_feature": { @@ -174,20 +266,18 @@ GET test/_search } -------------------------------------------------- // CONSOLE -// TEST[continued] - -[float] -==== Logarithm -This function gives a score that is equal to `log(scaling_factor + S)` where -`S` is the value of the rank feature and `scaling_factor` is a configurable scaling -factor. Scores are unbounded. +[[rank-feature-query-logarithm]] +===== Logarithm +The `log` function gives a score equal to `log(scaling_factor + S)`, where `S` +is the value of the rank feature field and `scaling_factor` is a configurable +scaling factor. Scores are unbounded. This function only supports rank features that have a positive score impact. [source,js] -------------------------------------------------- -GET test/_search +GET /test/_search { "query": { "rank_feature": { @@ -200,23 +290,21 @@ GET test/_search } -------------------------------------------------- // CONSOLE -// TEST[continued] -[float] -==== Sigmoid - -This function is an extension of `saturation` which adds a configurable +[[rank-feature-query-sigmoid]] +===== Sigmoid +The `sigmoid` function is an extension of `saturation` which adds a configurable exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the -`saturation` function, `pivot` is the value of `S` that gives a score of +0.5+ -and scores are in +(0, 1)+. +`saturation` function, `pivot` is the value of `S` that gives a score of `0.5` +and scores are `(0,1)`. -`exponent` must be positive, but is typically in +[0.5, 1]+. A good value should -be computed via training. If you don't have the opportunity to do so, we recommend -that you stick to the `saturation` function instead. +The `exponent` must be positive and is typically in `[0.5, 1]`. A +good value should be computed via training. If you don't have the opportunity to +do so, we recommend you use the `saturation` function instead. [source,js] -------------------------------------------------- -GET test/_search +GET /test/_search { "query": { "rank_feature": { @@ -229,5 +317,4 @@ GET test/_search } } -------------------------------------------------- -// CONSOLE -// TEST[continued] +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/regexp-query.asciidoc b/docs/reference/query-dsl/regexp-query.asciidoc index b50b2ca8f5055..1feed72d45b25 100644 --- a/docs/reference/query-dsl/regexp-query.asciidoc +++ b/docs/reference/query-dsl/regexp-query.asciidoc @@ -1,98 +1,89 @@ [[query-dsl-regexp-query]] -=== Regexp Query +=== Regexp query +++++ +Regexp +++++ -The `regexp` query allows you to use regular expression term queries. -See <> for details of the supported regular expression language. -The "term queries" in that first sentence means that Elasticsearch will apply -the regexp to the terms produced by the tokenizer for that field, and not -to the original text of the field. +Returns documents that contain terms matching a +https://en.wikipedia.org/wiki/Regular_expression[regular expression]. -*Note*: The performance of a `regexp` query heavily depends on the -regular expression chosen. Matching everything like `.*` is very slow as -well as using lookaround regular expressions. If possible, you should -try to use a long prefix before your regular expression starts. Wildcard -matchers like `.*?+` will mostly lower performance. +A regular expression is a way to match patterns in data using placeholder +characters, called operators. For a list of operators supported by the +`regexp` query, see <>. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "regexp":{ - "name.first": "s.*y" - } - } -} --------------------------------------------------- -// CONSOLE +[[regexp-query-ex-request]] +==== Example request -Boosting is also supported +The following search returns documents where the `user` field contains any term +that begins with `k` and ends with `y`. The `.*` operators match any +characters of any length, including no characters. Matching +terms can include `ky`, `kay`, and `kimchy`. [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "regexp":{ - "name.first":{ - "value":"s.*y", - "boost":1.2 + "regexp": { + "user": { + "value": "k.*y", + "flags" : "ALL", + "max_determinized_states": 10000, + "rewrite": "constant_score" } } } } --------------------------------------------------- +---- // CONSOLE -You can also use special flags -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "regexp":{ - "name.first": { - "value": "s.*y", - "flags" : "INTERSECTION|COMPLEMENT|EMPTY" - } - } - } -} --------------------------------------------------- -// CONSOLE +[[regexp-top-level-params]] +==== Top-level parameters for `regexp` +``:: +(Required, object) Field you wish to search. -Possible flags are `ALL` (default), `ANYSTRING`, `COMPLEMENT`, -`EMPTY`, `INTERSECTION`, `INTERVAL`, or `NONE`. Please check the -http://lucene.apache.org/core/4_9_0/core/org/apache/lucene/util/automaton/RegExp.html[Lucene -documentation] for their meaning +[[regexp-query-field-params]] +==== Parameters for `` +`value`:: +(Required, string) Regular expression for terms you wish to find in the provided +``. For a list of supported operators, see <>. ++ +-- +By default, regular expressions are limited to 1,000 characters. You can change +this limit using the <> +setting. -Regular expressions are dangerous because it's easy to accidentally -create an innocuous looking one that requires an exponential number of -internal determinized automaton states (and corresponding RAM and CPU) -for Lucene to execute. Lucene prevents these using the -`max_determinized_states` setting (defaults to 10000). You can raise -this limit to allow more complex regular expressions to execute. +[WARNING] +===== +The performance of the `regexp` query can vary based on the regular expression +provided. To improve performance, avoid using wildcard patterns, such as `.*` or +`.*?+`, without a prefix or suffix. +===== +-- -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "regexp":{ - "name.first": { - "value": "s.*y", - "flags" : "INTERSECTION|COMPLEMENT|EMPTY", - "max_determinized_states": 20000 - } - } - } -} --------------------------------------------------- -// CONSOLE +`flags`:: +(Optional, string) Enables optional operators for the regular expression. For +valid values and more information, see <>. + +`max_determinized_states`:: ++ +-- +(Optional, integer) Maximum number of +https://en.wikipedia.org/wiki/Deterministic_finite_automaton[automaton states] +required for the query. Default is `10000`. + +{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to parse +regular expressions. Lucene converts each regular expression to a finite +automaton containing a number of determinized states. -NOTE: By default the maximum length of regex string allowed in a Regexp Query -is limited to 1000. You can update the `index.max_regex_length` index setting -to bypass this limit. +You can use this parameter to prevent that conversion from unintentionally +consuming too many resources. You may need to increase this limit to run complex +regular expressions. +-- -include::regexp-syntax.asciidoc[] +`rewrite`:: +(Optional, string) Method used to rewrite the query. For valid values and more +information, see the <>. diff --git a/docs/reference/query-dsl/regexp-syntax.asciidoc b/docs/reference/query-dsl/regexp-syntax.asciidoc index 6a929ba98d502..cd8e24661728a 100644 --- a/docs/reference/query-dsl/regexp-syntax.asciidoc +++ b/docs/reference/query-dsl/regexp-syntax.asciidoc @@ -1,286 +1,224 @@ [[regexp-syntax]] -==== Regular expression syntax +== Regular expression syntax -Regular expression queries are supported by the `regexp` and the `query_string` -queries. The Lucene regular expression engine -is not Perl-compatible but supports a smaller range of operators. +A https://en.wikipedia.org/wiki/Regular_expression[regular expression] is a way to +match patterns in data using placeholder characters, called operators. -[NOTE] -===== -We will not attempt to explain regular expressions, but -just explain the supported operators. -===== +{es} supports regular expressions in the following queries: -===== Standard operators +* <> +* <> -Anchoring:: -+ --- - -Most regular expression engines allow you to match any part of a string. -If you want the regexp pattern to start at the beginning of the string or -finish at the end of the string, then you have to _anchor_ it specifically, -using `^` to indicate the beginning or `$` to indicate the end. - -Lucene's patterns are always anchored. The pattern provided must match -the entire string. For string `"abcde"`: - - ab.* # match - abcd # no match - --- - -Allowed characters:: -+ --- +{es} uses https://lucene.apache.org/core/[Apache Lucene]'s regular expression +engine to parse these queries. -Any Unicode characters may be used in the pattern, but certain characters -are reserved and must be escaped. The standard reserved characters are: +[float] +[[regexp-reserved-characters]] +=== Reserved characters +Lucene's regular expression engine supports all Unicode characters. However, the +following characters are reserved as operators: .... . ? + * | { } [ ] ( ) " \ .... -If you enable optional features (see below) then these characters may -also be reserved: +Depending on the <> enabled, the +following characters may also be reserved: - # @ & < > ~ - -Any reserved character can be escaped with a backslash `"\*"` including -a literal backslash character: `"\\"` +.... +# @ & < > ~ +.... -Additionally, any characters (except double quotes) are interpreted literally -when surrounded by double quotes: +To use one of these characters literally, escape it with a preceding +backslash or surround it with double quotes. For example: - john"@smith.com" +.... +\@ # renders as a literal '@' +\\ # renders as a literal '\' +"john@smith.com" # renders as 'john@smith.com' +.... + +[float] +[[regexp-standard-operators]] +=== Standard operators --- +Lucene's regular expression engine does not use the +https://en.wikipedia.org/wiki/Perl_Compatible_Regular_Expressions[Perl +Compatible Regular Expressions (PCRE)] library, but it does support the +following standard operators. -Match any character:: +`.`:: + -- +Matches any character. For example: -The period `"."` can be used to represent any character. For string `"abcde"`: - - ab... # match - a.c.e # match - +.... +ab. # matches 'aba', 'abb', 'abz', etc. +.... -- -One-or-more:: +`?`:: + -- +Repeat the preceding character zero or one times. Often used to make the +preceding character optional. For example: -The plus sign `"+"` can be used to repeat the preceding shortest pattern -once or more times. For string `"aaabbb"`: - - a+b+ # match - aa+bb+ # match - a+.+ # match - aa+bbb+ # match - +.... +abc? # matches 'ab' and 'abc' +.... -- -Zero-or-more:: +`+`:: + -- +Repeat the preceding character one or more times. For example: -The asterisk `"*"` can be used to match the preceding shortest pattern -zero-or-more times. For string `"aaabbb`": - - a*b* # match - a*b*c* # match - .*bbb.* # match - aaa*bbb* # match - +.... +ab+ # matches 'abb', 'abbb', 'abbbb', etc. +.... -- -Zero-or-one:: +`*`:: + -- +Repeat the preceding character zero or more times. For example: -The question mark `"?"` makes the preceding shortest pattern optional. It -matches zero or one times. For string `"aaabbb"`: - - aaa?bbb? # match - aaaa?bbbb? # match - .....?.? # match - aa?bb? # no match - +.... +ab* # matches 'ab', 'abb', 'abbb', 'abbbb', etc. +.... -- -Min-to-max:: +`{}`:: + -- +Minimum and maximum number of times the preceding character can repeat. For +example: -Curly brackets `"{}"` can be used to specify a minimum and (optionally) -a maximum number of times the preceding shortest pattern can repeat. The -allowed forms are: - - {5} # repeat exactly 5 times - {2,5} # repeat at least twice and at most 5 times - {2,} # repeat at least twice - -For string `"aaabbb"`: - - a{3}b{3} # match - a{2,4}b{2,4} # match - a{2,}b{2,} # match - .{3}.{3} # match - a{4}b{4} # no match - a{4,6}b{4,6} # no match - a{4,}b{4,} # no match - +.... +a{2} # matches 'aa' +a{2,4} # matches 'aa', 'aaa', and 'aaaa' +a{2,} # matches 'a` repeated two or more times +.... -- -Grouping:: +`|`:: + -- - -Parentheses `"()"` can be used to form sub-patterns. The quantity operators -listed above operate on the shortest previous pattern, which can be a group. -For string `"ababab"`: - - (ab)+ # match - ab(ab)+ # match - (..)+ # match - (...)+ # no match - (ab)* # match - abab(ab)? # match - ab(ab)? # no match - (ab){3} # match - (ab){1,2} # no match - +OR operator. The match will succeed if the longest pattern on either the left +side OR the right side matches. For example: +.... +abc|xyz # matches 'abc' and 'xyz' +.... -- -Alternation:: +`( … )`:: + -- +Forms a group. You can use a group to treat part of the expression as a single +character. For example: -The pipe symbol `"|"` acts as an OR operator. The match will succeed if -the pattern on either the left-hand side OR the right-hand side matches. -The alternation applies to the _longest pattern_, not the shortest. -For string `"aabb"`: - - aabb|bbaa # match - aacc|bb # no match - aa(cc|bb) # match - a+|b+ # no match - a+b+|b+a+ # match - a+(b|c)+ # match - +.... +abc(def)? # matches 'abc' and 'abcdef' but not 'abcd' +.... -- -Character classes:: +`[ … ]`:: + -- +Match one of the characters in the brackets. For example: -Ranges of potential characters may be represented as character classes -by enclosing them in square brackets `"[]"`. A leading `^` -negates the character class. The allowed forms are: - - [abc] # 'a' or 'b' or 'c' - [a-c] # 'a' or 'b' or 'c' - [-abc] # '-' or 'a' or 'b' or 'c' - [abc\-] # '-' or 'a' or 'b' or 'c' - [^abc] # any character except 'a' or 'b' or 'c' - [^a-c] # any character except 'a' or 'b' or 'c' - [^-abc] # any character except '-' or 'a' or 'b' or 'c' - [^abc\-] # any character except '-' or 'a' or 'b' or 'c' +.... +[abc] # matches 'a', 'b', 'c' +.... -Note that the dash `"-"` indicates a range of characters, unless it is -the first character or if it is escaped with a backslash. +Inside the brackets, `-` indicates a range unless `-` is the first character or +escaped. For example: -For string `"abcd"`: +.... +[a-c] # matches 'a', 'b', or 'c' +[-abc] # '-' is first character. Matches '-', 'a', 'b', or 'c' +[abc\-] # Escapes '-'. Matches 'a', 'b', 'c', or '-' +.... - ab[cd]+ # match - [a-d]+ # match - [^a-d]+ # no match +A `^` before a character in the brackets negates the character or range. For +example: +.... +[^abc] # matches any character except 'a', 'b', or 'c' +[^a-c] # matches any character except 'a', 'b', or 'c' +[^-abc] # matches any character except '-', 'a', 'b', or 'c' +[^abc\-] # matches any character except 'a', 'b', 'c', or '-' +.... -- -===== Optional operators - -These operators are available by default as the `flags` parameter defaults to `ALL`. -Different flag combinations (concatenated with `"|"`) can be used to enable/disable -specific operators: +[float] +[[regexp-optional-operators]] +=== Optional operators - { - "regexp": { - "username": { - "value": "john~athon<1-5>", - "flags": "COMPLEMENT|INTERVAL" - } - } - } +You can use the `flags` parameter to enable more optional operators for +Lucene's regular expression engine. -Complement:: -+ --- - -The complement is probably the most useful option. The shortest pattern that -follows a tilde `"~"` is negated. For instance, `"ab~cd" means: +To enable multiple operators, use a `|` separator. For example, a `flags` value +of `COMPLEMENT|INTERVAL` enables the `COMPLEMENT` and `INTERVAL` operators. -* Starts with `a` -* Followed by `b` -* Followed by a string of any length that it anything but `c` -* Ends with `d` +[float] +==== Valid values -For the string `"abcdef"`: +`ALL` (Default):: +Enables all optional operators. - ab~df # match - ab~cf # match - ab~cdef # no match - a~(cb)def # match - a~(bc)def # no match - -Enabled with the `COMPLEMENT` or `ALL` flags. +`COMPLEMENT`:: ++ +-- +Enables the `~` operator. You can use `~` to negate the shortest following +pattern. For example: +.... +a~bc # matches 'adc' and 'aec' but not 'abc' +.... -- -Interval:: +`INTERVAL`:: + -- +Enables the `<>` operators. You can use `<>` to match a numeric range. For +example: -The interval option enables the use of numeric ranges, enclosed by angle -brackets `"<>"`. For string: `"foo80"`: - - foo<1-100> # match - foo<01-100> # match - foo<001-100> # no match - -Enabled with the `INTERVAL` or `ALL` flags. - - +.... +foo<1-100> # matches 'foo1', 'foo2' ... 'foo99', 'foo100' +foo<01-100> # matches 'foo01', 'foo02' ... 'foo99', 'foo100' +.... -- -Intersection:: +`INTERSECTION`:: + -- +Enables the `&` operator, which acts as an AND operator. The match will succeed +if patterns on both the left side AND the right side matches. For example: -The ampersand `"&"` joins two patterns in a way that both of them have to -match. For string `"aaabbb"`: - - aaa.+&.+bbb # match - aaa&bbb # no match - -Using this feature usually means that you should rewrite your regular -expression. - -Enabled with the `INTERSECTION` or `ALL` flags. - +.... +aaa.+&.+bbb # matches 'aaabbb' +.... -- -Any string:: +`ANYSTRING`:: + -- +Enables the `@` operator. You can use `@` to match any entire +string. -The at sign `"@"` matches any string in its entirety. This could be combined -with the intersection and complement above to express ``everything except''. -For instance: +You can combine the `@` operator with `&` and `~` operators to create an +"everything except" logic. For example: - @&~(foo.+) # anything except string beginning with "foo" - -Enabled with the `ANYSTRING` or `ALL` flags. +.... +@&~(abc.+) # matches everything except terms beginning with 'abc' +.... -- + +[float] +[[regexp-unsupported-operators]] +=== Unsupported operators +Lucene's regular expression engine does not support anchor operators, such as +`^` (beginning of line) or `$` (end of line). To match a term, the regular +expression must match the entire string. \ No newline at end of file diff --git a/docs/reference/query-dsl/script-query.asciidoc b/docs/reference/query-dsl/script-query.asciidoc index 917991e3211c6..e8c349ea81d4a 100644 --- a/docs/reference/query-dsl/script-query.asciidoc +++ b/docs/reference/query-dsl/script-query.asciidoc @@ -1,12 +1,18 @@ [[query-dsl-script-query]] -=== Script Query +=== Script query +++++ +Script +++++ -A query allowing to define -<> as queries. They are typically used in a filter -context, for example: +Filters documents based on a provided <>. The +`script` query is typically used in a <>. + + +[[script-query-ex-request]] +==== Example request [source,js] ----------------------------------------------- +---- GET /_search { "query": { @@ -22,18 +28,29 @@ GET /_search } } } ----------------------------------------------- +---- // CONSOLE -[float] -==== Custom Parameters -Scripts are compiled and cached for faster execution. If the same script -can be used, just with different parameters provider, it is preferable -to use the ability to pass parameters to the script itself, for example: +[[script-top-level-params]] +==== Top-level parameters for `script` + +`script`:: +(Required, <>) Contains a script to run +as a query. This script must return a boolean value, `true` or `false`. + +[[script-query-notes]] +==== Notes + +[[script-query-custom-params]] +===== Custom Parameters + +Like <>, scripts are cached for faster execution. +If you frequently change the arguments of a script, we recommend you store them +in the script's `params` parameter. For example: [source,js] ----------------------------------------------- +---- GET /_search { "query": { @@ -52,6 +69,5 @@ GET /_search } } } ----------------------------------------------- -// CONSOLE - +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index cdcfd0f0a5032..c2cd3b4c9e093 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-script-score-query]] -=== Script Score Query +=== Script score query +++++ +Script score +++++ experimental[] @@ -131,7 +134,7 @@ these unique values need to be loaded into memory. // NOTCONSOLE -[[decay-functions]] +[[decay-functions-numeric-fields]] ===== Decay functions for numeric fields You can read more about decay functions {ref}/query-dsl-function-score-query.html#function-decay[here]. @@ -221,10 +224,12 @@ Script Score Query will be a substitute for it. Here we describe how Function Score Query's functions can be equivalently implemented in Script Score Query: +[[script-score]] ===== `script_score` What you used in `script_score` of the Function Score query, you can copy into the Script Score query. No changes here. +[[weight]] ===== `weight` `weight` function can be implemented in the Script Score query through the following script: @@ -240,12 +245,13 @@ the following script: -------------------------------------------------- // NOTCONSOLE +[[random-score]] ===== `random_score` Use `randomReproducible` and `randomNotReproducible` functions as described in <>. - +[[field-value-factor]] ===== `field_value_factor` `field_value_factor` function can be easily implemented through script: @@ -295,7 +301,7 @@ through a script: | `reciprocal` | `1.0 / doc['f'].value` |======================================================================= - +[[decay-functions]] ===== `decay functions` Script Score query has equivalent <> that can be used in script. diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 113db56072df6..44f811007a60b 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -1,10 +1,24 @@ [[query-dsl-simple-query-string-query]] -=== Simple Query String Query +=== Simple query string query +++++ +Simple query string +++++ -A query that uses the SimpleQueryParser to parse its context. Unlike the -regular `query_string` query, the `simple_query_string` query will never -throw an exception, and discards invalid parts of the query. Here is -an example: +Returns documents based on a provided query string, using a parser with a +limited but fault-tolerant syntax. + +This query uses a <> to parse and +split the provided query string into terms based on special operators. The query +then <> each term independently before returning matching +documents. + +While its syntax is more limited than the +<>, the `simple_query_string` +query does not return errors for invalid syntax. Instead, it ignores any invalid +parts of the query string. + +[[simple-query-string-query-ex-request]] +==== Example request [source,js] -------------------------------------------------- @@ -21,72 +35,108 @@ GET /_search -------------------------------------------------- // CONSOLE -The `simple_query_string` top level parameters include: -[cols="<,<",options="header",] -|======================================================================= -|Parameter |Description -|`query` |The actual query to be parsed. See below for syntax. +[[simple-query-string-top-level-params]] +==== Top-level parameters for `simple_query_string` + +`query`:: +(Required, string) Query string you wish to parse and use for search. See <>. + +`fields`:: ++ +-- +(Optional, array of strings) Array of fields you wish to search. + +This field accepts wildcard expressions. You also can boost relevance scores for +matches to particular fields using a caret (`^`) notation. See +<> for examples. + +Defaults to the `index.query.default_field` index setting, which has a default +value of `*`. The `*` value extracts all fields that are eligible to term +queries and filters the metadata fields. All extracted fields are then combined +to build a query if no `prefix` is specified. -|`fields` |The fields to perform the parsed query against. Defaults to the -`index.query.default_field` index settings, which in turn defaults to `*`. `*` -extracts all fields in the mapping that are eligible to term queries and filters -the metadata fields. +WARNING: There is a limit on the number of fields that can be queried at once. +It is defined by the `indices.query.bool.max_clause_count` +<>, which defaults to `1024`. +-- -WARNING: There is a limit on the number of fields that can be queried -at once. It is defined by the `indices.query.bool.max_clause_count` <> -which defaults to 1024. +`default_operator`:: ++ +-- +(Optional, string) Default boolean logic used to interpret text in the query +string if no operators are specified. Valid values are: -|`default_operator` |The default operator used if no explicit operator -is specified. For example, with a default operator of `OR`, the query -`capital of Hungary` is translated to `capital OR of OR Hungary`, and -with default operator of `AND`, the same query is translated to -`capital AND of AND Hungary`. The default value is `OR`. +`OR` (Default):: +For example, a query string of `capital of Hungary` is interpreted as `capital +OR of OR Hungary`. -|`analyzer` |Force the analyzer to use to analyze each term of the query when -creating composite queries. +`AND`:: +For example, a query string of `capital of Hungary` is interpreted as `capital +AND of AND Hungary`. +-- -|`flags` |A set of <> specifying which features of the -`simple_query_string` to enable. Defaults to `ALL`. +`all_fields`:: +deprecated:[6.0.0, set `fields` to `*` instead](Optional, boolean) If `true`, +search all searchable fields in the index's field mapping. -|`analyze_wildcard` | Whether terms of prefix queries should be automatically -analyzed or not. If `true` a best effort will be made to analyze the prefix. However, -some analyzers will be not able to provide a meaningful results -based just on the prefix of a term. Defaults to `false`. +`analyze_wildcard`:: +(Optional, boolean) If `true`, the query attempts to analyze wildcard terms in +the query string. Defaults to `false`. -|`lenient` | If set to `true` will cause format based failures -(like providing text to a numeric field) to be ignored. +`analyzer`:: +(Optional, string) <> used to convert text in the +query string into tokens. Defaults to the +<> mapped for the +`default_field`. If no analyzer is mapped, the index's default analyzer is used. -|`minimum_should_match` | The minimum number of clauses that must match for a - document to be returned. See the - <> documentation for the - full list of options. +`auto_generate_synonyms_phrase_query`:: +(Optional, boolean) If `true`, <> +queries are automatically created for multi-term synonyms. Defaults to `true`. +See <> for an example. -|`quote_field_suffix` | A suffix to append to fields for quoted parts of -the query string. This allows to use a field that has a different analysis chain -for exact matching. Look <> for a -comprehensive example. +`flags`:: +(Optional, string) List of enabled operators for the +<>. Defaults to `ALL` +(all operators). See <> for valid values. -|`auto_generate_synonyms_phrase_query` |Whether phrase queries should be automatically generated for multi terms synonyms. -Defaults to `true`. +`fuzzy_max_expansions`:: +(Optional, integer) Maximum number of terms to which the query expands for fuzzy +matching. Defaults to `50`. -|`all_fields` | deprecated[6.0.0, set `fields` to `*` instead] -Perform the query on all fields detected in the mapping that can -be queried. +`fuzzy_prefix_length`:: +(Optional, integer) Number of beginning characters left unchanged for fuzzy +matching. Defaults to `0`. -|`fuzzy_prefix_length` |Set the prefix length for fuzzy queries. Default -is `0`. +`fuzzy_transpositions`:: +(Optional, boolean) If `true`, edits for fuzzy matching include +transpositions of two adjacent characters (ab → ba). Defaults to `true`. -|`fuzzy_max_expansions` |Controls the number of terms fuzzy queries will -expand to. Defaults to `50` +`lenient`:: +(Optional, boolean) If `true`, format-based errors, such as providing a text +value for a <> field, are ignored. Defaults to `false`. -|`fuzzy_transpositions` |Set to `false` to disable fuzzy transpositions (`ab` -> `ba`). -Default is `true`. -|======================================================================= +`minimum_should_match`:: +(Optional, string) Minimum number of clauses that must match for a document to +be returned. See the <> for valid values and more information. -[float] -===== Simple Query String Syntax -The `simple_query_string` supports the following special characters: +`quote_field_suffix`:: ++ +-- +(Optional, string) Suffix appended to quoted text in the query string. + +You can use this suffix to use a different analysis method for exact matches. +See <>. +-- + + +[[simple-query-string-query-notes]] +==== Notes + +[[simple-query-string-syntax]] +===== Simple query string syntax +The `simple_query_string` query supports the following operators: * `+` signifies AND operation * `|` signifies OR operation @@ -97,11 +147,11 @@ The `simple_query_string` supports the following special characters: * `~N` after a word signifies edit distance (fuzziness) * `~N` after a phrase signifies slop amount -In order to search for any of these special characters, they will need to -be escaped with `\`. +To use one of these characters literally, escape it with a preceding backslash +(`\`). -Be aware that this syntax may have a different behavior depending on the -`default_operator` value. For example, consider the following query: +The behavior of these operators may differ depending on the `default_operator` +value. For example: [source,js] -------------------------------------------------- @@ -117,26 +167,20 @@ GET /_search -------------------------------------------------- // CONSOLE -You may expect that documents containing only "foo" or "bar" will be returned, -as long as they do not contain "baz", however, due to the `default_operator` -being OR, this really means "match documents that contain "foo" or documents -that contain "bar", or documents that don't contain "baz". If this is unintended -then the query can be switched to `"foo bar +-baz"` which will not return -documents that contain "baz". - -[float] -==== Default Field -When not explicitly specifying the field to search on in the query -string syntax, the `index.query.default_field` will be used to derive -which fields to search on. It defaults to `*` and the query will automatically -attempt to determine the existing fields in the index's mapping that are queryable, -and perform the search on those fields. - -[float] -==== Multi Field -The fields parameter can also include pattern based field names, -allowing to automatically expand to the relevant fields (dynamically -introduced fields included). For example: +This search is intended to only return documents containing `foo` or `bar` that +also do **not** contain `baz`. However because of a `default_operator` of `OR`, +this search actually returns documents that contain `foo` or `bar` and any +documents that don't contain `baz`. To return documents as intended, change the +query string to `foo bar +-baz`. + +[[supported-flags]] +===== Limit operators +You can use the `flags` parameter to limit the supported operators for the +simple query string syntax. + +To explicitly enable only specific operators, use a `|` separator. For example, +a `flags` value of `OR|AND|PREFIX` disables all operators except `OR`, `AND`, +and `PREFIX`. [source,js] -------------------------------------------------- @@ -144,57 +188,100 @@ GET /_search { "query": { "simple_query_string" : { - "fields" : ["content", "name.*^5"], - "query" : "foo bar baz" + "query" : "foo | bar + baz*", + "flags" : "OR|AND|PREFIX" } } } -------------------------------------------------- // CONSOLE -[float] -[[supported-flags]] -==== Flags -`simple_query_string` support multiple flags to specify which parsing features -should be enabled. It is specified as a `|`-delimited string with the -`flags` parameter: +[[supported-flags-values]] +====== Valid values +The available flags are: + +`ALL` (Default):: +Enables all optional operators. + +`AND`:: +Enables the `+` AND operator. + +`ESCAPE`:: +Enables `\` as an escape character. + +`FUZZY`:: +Enables the `~N` operator after a word, where `N` is an integer denoting the +allowed edit distance for matching. See <>. + +`NEAR`:: +Enables the `~N` operator, after a phrase where `N` is the maximum number of +positions allowed between matching tokens. Synonymous to `SLOP`. + +`NONE`:: +Disables all operators. + +`NOT`:: +Enables the `-` NOT operator. + +`OR`:: +Enables the `\|` OR operator. + +`PHRASE`:: +Enables the `"` quotes operator used to search for phrases. + +`PRECEDENCE`:: +Enables the `(` and `)` operators to control operator precedence. + +`PREFIX`:: +Enables the `*` prefix operator. + +`SLOP`:: +Enables the `~N` operator, after a phrase where `N` is maximum number of +positions allowed between matching tokens. Synonymous to `NEAR`. + +`WHITESPACE`:: +Enables whitespace as split characters. + +[[simple-query-string-boost]] +===== Wildcards and per-field boosts in the `fields` parameter + +Fields can be specified with wildcards, eg: [source,js] -------------------------------------------------- GET /_search { - "query": { - "simple_query_string" : { - "query" : "foo | bar + baz*", - "flags" : "OR|AND|PREFIX" - } + "query": { + "simple_query_string" : { + "query": "Will Smith", + "fields": [ "title", "*_name" ] <1> } + } } -------------------------------------------------- // CONSOLE +<1> Query the `title`, `first_name` and `last_name` fields. -The available flags are: +Individual fields can be boosted with the caret (`^`) notation: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "simple_query_string" : { + "query" : "this is a test", + "fields" : [ "subject^3", "message" ] <1> + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> The `subject` field is three times as important as the `message` field. -[cols="<,<",options="header",] -|======================================================================= -|Flag |Description -|`ALL` |Enables all parsing features. This is the default. -|`NONE` |Switches off all parsing features. -|`AND` |Enables the `+` AND operator. -|`OR` |Enables the `\|` OR operator. -|`NOT` |Enables the `-` NOT operator. -|`PREFIX` |Enables the `*` Prefix operator. -|`PHRASE` |Enables the `"` quotes operator used to search for phrases. -|`PRECEDENCE` |Enables the `(` and `)` operators to control operator precedence. -|`ESCAPE` |Enables `\` as the escape character. -|`WHITESPACE` |Enables whitespaces as split characters. -|`FUZZY` |Enables the `~N` operator after a word where N is an integer denoting the allowed edit distance for matching (see <>). -|`SLOP` |Enables the `~N` operator after a phrase where N is an integer denoting the slop amount. -|`NEAR` |Synonymous to `SLOP`. -|======================================================================= - -[float] -==== Synonyms +[[simple-query-string-synonyms]] +===== Synonyms The `simple_query_string` query supports multi-terms synonym expansion with the <> token filter. When this filter is used, the parser creates a phrase query for each multi-terms synonyms. diff --git a/docs/reference/query-dsl/span-containing-query.asciidoc b/docs/reference/query-dsl/span-containing-query.asciidoc index 638c699923305..7b5fb0ba7aeae 100644 --- a/docs/reference/query-dsl/span-containing-query.asciidoc +++ b/docs/reference/query-dsl/span-containing-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-containing-query]] -=== Span Containing Query +=== Span containing query +++++ +Span containing +++++ Returns matches which enclose another span query. The span containing query maps to Lucene `SpanContainingQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index d9e96635a29c4..67ef67e595187 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-field-masking-query]] -=== Span Field Masking Query +=== Span field masking query +++++ +Span field masking +++++ Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. The span field masking query maps to Lucene's `SpanFieldMaskingQuery` diff --git a/docs/reference/query-dsl/span-first-query.asciidoc b/docs/reference/query-dsl/span-first-query.asciidoc index dba7932661deb..a5d23071f796a 100644 --- a/docs/reference/query-dsl/span-first-query.asciidoc +++ b/docs/reference/query-dsl/span-first-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-first-query]] -=== Span First Query +=== Span first query +++++ +Span first +++++ Matches spans near the beginning of a field. The span first query maps to Lucene `SpanFirstQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index f79283b7fa4f9..c645d45e237dd 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-multi-term-query]] -=== Span Multi Term Query +=== Span multi-term query +++++ +Span multi-term +++++ The `span_multi` query allows you to wrap a `multi term query` (one of wildcard, fuzzy, prefix, range or regexp query) as a `span query`, so diff --git a/docs/reference/query-dsl/span-near-query.asciidoc b/docs/reference/query-dsl/span-near-query.asciidoc index e69be783e3d60..acb94a318815b 100644 --- a/docs/reference/query-dsl/span-near-query.asciidoc +++ b/docs/reference/query-dsl/span-near-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-near-query]] -=== Span Near Query +=== Span near query +++++ +Span near +++++ Matches spans which are near one another. One can specify _slop_, the maximum number of intervening unmatched positions, as well as whether diff --git a/docs/reference/query-dsl/span-not-query.asciidoc b/docs/reference/query-dsl/span-not-query.asciidoc index 29a803a74767b..561f4eb1eb1ed 100644 --- a/docs/reference/query-dsl/span-not-query.asciidoc +++ b/docs/reference/query-dsl/span-not-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-not-query]] -=== Span Not Query +=== Span not query +++++ +Span not +++++ Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens diff --git a/docs/reference/query-dsl/span-or-query.asciidoc b/docs/reference/query-dsl/span-or-query.asciidoc index 470935d6f5c4a..e1c0c9263a80f 100644 --- a/docs/reference/query-dsl/span-or-query.asciidoc +++ b/docs/reference/query-dsl/span-or-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-or-query]] -=== Span Or Query +=== Span or query +++++ +Span or +++++ Matches the union of its span clauses. The span or query maps to Lucene `SpanOrQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-queries.asciidoc b/docs/reference/query-dsl/span-queries.asciidoc index 7dc65433432ec..cc14b0ee4935a 100644 --- a/docs/reference/query-dsl/span-queries.asciidoc +++ b/docs/reference/query-dsl/span-queries.asciidoc @@ -15,62 +15,55 @@ Span queries cannot be mixed with non-span queries (with the exception of the `s The queries in this group are: -<>:: +<>:: +Accepts a list of span queries, but only returns those spans which also match a second span query. -The equivalent of the <> but for use with -other span queries. +<>:: +Allows queries like `span-near` or `span-or` across different fields. -<>:: +<>:: +Accepts another span query whose matches must appear within the first N +positions of the field. +<>:: Wraps a <>, <>, <>, <>, <>, or <> query. -<>:: - -Accepts another span query whose matches must appear within the first N -positions of the field. - <>:: - Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. -<>:: +<>:: +Wraps another span query, and excludes any documents which match that query. +<>:: Combines multiple span queries -- returns documents which match any of the specified queries. -<>:: - -Wraps another span query, and excludes any documents which match that query. - -<>:: +<>:: -Accepts a list of span queries, but only returns those spans which also match a second span query. +The equivalent of the <> but for use with +other span queries. <>:: - The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. -<>:: - -Allows queries like `span-near` or `span-or` across different fields. -include::span-term-query.asciidoc[] +include::span-containing-query.asciidoc[] -include::span-multi-term-query.asciidoc[] +include::span-field-masking-query.asciidoc[] include::span-first-query.asciidoc[] -include::span-near-query.asciidoc[] +include::span-multi-term-query.asciidoc[] -include::span-or-query.asciidoc[] +include::span-near-query.asciidoc[] include::span-not-query.asciidoc[] -include::span-containing-query.asciidoc[] +include::span-or-query.asciidoc[] -include::span-within-query.asciidoc[] +include::span-term-query.asciidoc[] -include::span-field-masking-query.asciidoc[] +include::span-within-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/span-term-query.asciidoc b/docs/reference/query-dsl/span-term-query.asciidoc index 1b12a3c35f796..ba31b471ef2ae 100644 --- a/docs/reference/query-dsl/span-term-query.asciidoc +++ b/docs/reference/query-dsl/span-term-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-term-query]] -=== Span Term Query +=== Span term query +++++ +Span term +++++ Matches spans containing a term. The span term query maps to Lucene `SpanTermQuery`. Here is an example: diff --git a/docs/reference/query-dsl/span-within-query.asciidoc b/docs/reference/query-dsl/span-within-query.asciidoc index b70835c4134b4..f344f32b21854 100644 --- a/docs/reference/query-dsl/span-within-query.asciidoc +++ b/docs/reference/query-dsl/span-within-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-span-within-query]] -=== Span Within Query +=== Span within query +++++ +Span within +++++ Returns matches which are enclosed inside another span query. The span within query maps to Lucene `SpanWithinQuery`. Here is an example: diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index 04ab2d53f6d35..f93b439a66771 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -5,41 +5,35 @@ This group contains queries which do not fit into the other groups: <>:: - This query finds documents which are similar to the specified text, document, or collection of documents. -<>:: - -This query allows a script to act as a filter. Also see the -<>. - -<>:: - -A query that allows to modify the score of a sub-query with a script. - <>:: - This query finds queries that are stored as documents that match with the specified document. <>:: - A query that computes scores based on the values of numeric features and is able to efficiently skip non-competitive hits. -<>:: +<>:: +This query allows a script to act as a filter. Also see the +<>. +<>:: +A query that allows to modify the score of a sub-query with a script. + +<>:: A query that accepts other queries as json or yaml string. include::mlt-query.asciidoc[] -include::script-query.asciidoc[] - -include::script-score-query.asciidoc[] - include::percolate-query.asciidoc[] include::rank-feature-query.asciidoc[] -include::wrapper-query.asciidoc[] +include::script-query.asciidoc[] + +include::script-score-query.asciidoc[] + +include::wrapper-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index f4e185ba9597a..f2d45c097fc14 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -1,73 +1,75 @@ [[term-level-queries]] -== Term level queries +== Term-level queries -While the <> will analyze the query -string before executing, the _term-level queries_ operate on the exact terms -that are stored in the inverted index, and will normalize terms before executing -only for <> fields with <> property. +You can use **term-level queries** to find documents based on precise values in +structured data. Examples of structured data include date ranges, IP addresses, +prices, or product IDs. -These queries are usually used for structured data like numbers, dates, and -enums, rather than full text fields. Alternatively, they allow you to craft -low-level queries, foregoing the analysis process. +Unlike <>, term-level queries do not +analyze search terms. Instead, term-level queries match the exact terms stored +in a field. -The queries in this group are: -<>:: +[NOTE] +==== +Term-level queries still normalize search terms for `keyword` fields with the +`normalizer` property. For more details, see <>. +==== - Find documents which contain the exact term specified in the field - specified. +[float] +[[term-level-query-types]] +=== Types of term-level queries -<>:: +<>:: +Returns documents that contain any indexed value for a field. - Find documents which contain any of the exact terms specified in the field - specified. +<>:: +Returns documents that contain terms similar to the search term. {es} measures +similarity, or fuzziness, using a +http://en.wikipedia.org/wiki/Levenshtein_distance[Levenshtein edit distance]. -<>:: +<>:: +Returns documents based on their <>. - Find documents which match with one or more of the specified terms. The - number of terms that must match depend on the specified minimum should - match field or script. +<>:: +Returns documents that contain a specific prefix in a provided field. <>:: +Returns documents that contain terms within a provided range. - Find documents where the field specified contains values (dates, numbers, - or strings) in the range specified. +<>:: +Returns documents that contain terms matching a +https://en.wikipedia.org/wiki/Regular_expression[regular expression]. -<>:: +<>:: +Returns documents that contain an exact term in a provided field. - Find documents where the field specified contains any non-null value. +<>:: +Returns documents that contain one or more exact terms in a provided field. -<>:: +<>:: +Returns documents that contain a minimum number of exact terms in a provided +field. You can define the minimum number of matching terms using a field or +script. - Find documents where the field specified contains terms which begin with - the exact prefix specified. +<>:: +Returns documents of the specified type. <>:: +Returns documents that contain terms matching a wildcard pattern. - Find documents where the field specified contains terms which match the - pattern specified, where the pattern supports single character wildcards - (`?`) and multi-character wildcards (`*`) - -<>:: - - Find documents where the field specified contains terms which match the - <> specified. - -<>:: - Find documents where the field specified contains terms which are fuzzily - similar to the specified term. Fuzziness is measured as a - http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance[Levenshtein edit distance] - of 1 or 2. +include::exists-query.asciidoc[] -<>:: +include::fuzzy-query.asciidoc[] - Find documents of the specified type. +include::ids-query.asciidoc[] -<>:: +include::prefix-query.asciidoc[] - Find documents with the specified type and IDs. +include::range-query.asciidoc[] +include::regexp-query.asciidoc[] include::term-query.asciidoc[] @@ -75,18 +77,6 @@ include::terms-query.asciidoc[] include::terms-set-query.asciidoc[] -include::range-query.asciidoc[] - -include::exists-query.asciidoc[] - -include::prefix-query.asciidoc[] - -include::wildcard-query.asciidoc[] - -include::regexp-query.asciidoc[] - -include::fuzzy-query.asciidoc[] - include::type-query.asciidoc[] -include::ids-query.asciidoc[] +include::wildcard-query.asciidoc[] diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 910123bbe6177..a80f065dcd781 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -1,168 +1,223 @@ [[query-dsl-term-query]] -=== Term Query +=== Term query +++++ +Term +++++ -The `term` query finds documents that contain the *exact* term specified -in the inverted index. For instance: +Returns documents that contain an *exact* term in a provided field. -[source,js] --------------------------------------------------- -POST _search -{ - "query": { - "term" : { "user" : "Kimchy" } <1> - } -} --------------------------------------------------- -// CONSOLE -<1> Finds documents which contain the exact term `Kimchy` in the inverted index - of the `user` field. +You can use the `term` query to find documents based on a precise value such as +a price, a product ID, or a username. + +[WARNING] +==== +Avoid using the `term` query for <> fields. + +By default, {es} changes the values of `text` fields as part of <>. This can make finding exact matches for `text` field values +difficult. -A `boost` parameter can be specified to give this `term` query a higher -relevance score than another query, for instance: +To search `text` field values, use the <> query +instead. +==== + +[[term-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- -GET _search +---- +GET /_search { - "query": { - "bool": { - "should": [ - { - "term": { - "status": { - "value": "urgent", - "boost": 2.0 <1> + "query": { + "term": { + "user": { + "value": "Kimchy", + "boost": 1.0 } - } - }, - { - "term": { - "status": "normal" <2> - } } - ] } - } } --------------------------------------------------- +---- // CONSOLE -<1> The `urgent` query clause has a boost of `2.0`, meaning it is twice as important - as the query clause for `normal`. -<2> The `normal` clause has the default neutral boost of `1.0`. - -A `term` query can also match against <>. - -.Why doesn't the `term` query match my document? -************************************************** - -String fields can be of type `text` (treated as full text, like the body of an -email), or `keyword` (treated as exact values, like an email address or a -zip code). Exact values (like numbers, dates, and keywords) have -the exact value specified in the field added to the inverted index in order -to make them searchable. - -However, `text` fields are `analyzed`. This means that their -values are first passed through an <> to produce a list of -terms, which are then added to the inverted index. - -There are many ways to analyze text: the default -<> drops most punctuation, -breaks up text into individual words, and lower cases them. For instance, -the `standard` analyzer would turn the string ``Quick Brown Fox!'' into the -terms [`quick`, `brown`, `fox`]. - -This analysis process makes it possible to search for individual words -within a big block of full text. - -The `term` query looks for the *exact* term in the field's inverted index -- -it doesn't know anything about the field's analyzer. This makes it useful for -looking up values in keyword fields, or in numeric or date -fields. When querying full text fields, use the -<> instead, which understands how the field -has been analyzed. - - -To demonstrate, try out the example below. First, create an index, specifying the field mappings, and index a document: +[[term-top-level-params]] +==== Top-level parameters for `term` +``:: +(Required, object) Field you wish to search. + +[[term-field-params]] +==== Parameters for `` +`value`:: +(Required, string) Term you wish to find in the provided ``. To return a +document, the term must exactly match the field value, including whitespace and +capitalization. + +`boost`:: +(Optional, float) Floating point number used to decrease or increase the +<> of a query. Defaults to `1.0`. ++ +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. ++ +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. + +[[term-query-notes]] +==== Notes + +[[avoid-term-query-text-fields]] +===== Avoid using the `term` query for `text` fields +By default, {es} changes the values of `text` fields during analysis. For +example, the default <> changes +`text` field values as follows: + +* Removes most punctuation +* Divides the remaining content into individual words, called +<> +* Lowercases the tokens + +To better search `text` fields, the `match` query also analyzes your provided +search term before performing a search. This means the `match` query can search +`text` fields for analyzed tokens rather than an exact term. + +The `term` query does *not* analyze the search term. The `term` query only +searches for the *exact* term you provide. This means the `term` query may +return poor or no results when searching `text` fields. + +To see the difference in search results, try the following example. + +. Create an index with a `text` field called `full_text`. ++ +-- [source,js] --------------------------------------------------- +---- PUT my_index { - "mappings": { - "properties": { - "full_text": { - "type": "text" <1> - }, - "exact_value": { - "type": "keyword" <2> - } + "mappings" : { + "properties" : { + "full_text" : { "type" : "text" } + } } - } } +---- +// CONSOLE + +-- +. Index a document with a value of `Quick Brown Foxes!` in the `full_text` +field. ++ +-- + +[source,js] +---- PUT my_index/_doc/1 { - "full_text": "Quick Foxes!", <3> - "exact_value": "Quick Foxes!" <4> + "full_text": "Quick Brown Foxes!" } --------------------------------------------------- +---- // CONSOLE +// TEST[continued] + +Because `full_text` is a `text` field, {es} changes `Quick Brown Foxes!` to +`[quick, brown, fox]` during analysis. -<1> The `full_text` field is of type `text` and will be analyzed. -<2> The `exact_value` field is of type `keyword` and will NOT be analyzed. -<3> The `full_text` inverted index will contain the terms: [`quick`, `foxes`]. -<4> The `exact_value` inverted index will contain the exact term: [`Quick Foxes!`]. +-- -Now, compare the results for the `term` query and the `match` query: +. Use the `term` query to search for `Quick Brown Foxes!` in the `full_text` +field. Include the `pretty` parameter so the response is more readable. ++ +-- [source,js] --------------------------------------------------- -GET my_index/_search +---- +GET my_index/_search?pretty { "query": { "term": { - "exact_value": "Quick Foxes!" <1> + "full_text": "Quick Brown Foxes!" } } } +---- +// CONSOLE +// TEST[continued] -GET my_index/_search -{ - "query": { - "term": { - "full_text": "Quick Foxes!" <2> - } - } -} +Because the `full_text` field no longer contains the *exact* term `Quick Brown +Foxes!`, the `term` query search returns no results. -GET my_index/_search -{ - "query": { - "term": { - "full_text": "foxes" <3> - } - } -} +-- + +. Use the `match` query to search for `Quick Brown Foxes!` in the `full_text` +field. ++ +-- + +//// -GET my_index/_search +[source,js] +---- +POST my_index/_refresh +---- +// CONSOLE +// TEST[continued] + +//// + +[source,js] +---- +GET my_index/_search?pretty { "query": { "match": { - "full_text": "Quick Foxes!" <4> + "full_text": "Quick Brown Foxes!" } } } --------------------------------------------------- +---- // CONSOLE // TEST[continued] -<1> This query matches because the `exact_value` field contains the exact - term `Quick Foxes!`. -<2> This query does not match, because the `full_text` field only contains - the terms `quick` and `foxes`. It does not contain the exact term - `Quick Foxes!`. -<3> A `term` query for the term `foxes` matches the `full_text` field. -<4> This `match` query on the `full_text` field first analyzes the query string, - then looks for documents containing `quick` or `foxes` or both. -************************************************** +Unlike the `term` query, the `match` query analyzes your provided search term, +`Quick Brown Foxes!`, before performing a search. The `match` query then returns +any documents containing the `quick`, `brown`, or `fox` tokens in the +`full_text` field. + +Here's the response for the `match` query search containing the indexed document +in the results. + +[source,js] +---- +{ + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.8630463, + "hits" : [ + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 0.8630463, + "_source" : { + "full_text" : "Quick Brown Foxes!" + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took" : 1/"took" : $body.took/] +-- \ No newline at end of file diff --git a/docs/reference/query-dsl/terms-query.asciidoc b/docs/reference/query-dsl/terms-query.asciidoc index db4597fbea504..26d9435991aa7 100644 --- a/docs/reference/query-dsl/terms-query.asciidoc +++ b/docs/reference/query-dsl/terms-query.asciidoc @@ -1,121 +1,255 @@ [[query-dsl-terms-query]] -=== Terms Query +=== Terms query +++++ +Terms +++++ -Filters documents that have fields that match any of the provided terms -(*not analyzed*). For example: +Returns documents that contain one or more *exact* terms in a provided field. + +The `terms` query is the same as the <>, +except you can search for multiple values. + +[[terms-query-ex-request]] +==== Example request + +The following search returns documents where the `user` field contains `kimchy` +or `elasticsearch`. [source,js] --------------------------------------------------- +---- GET /_search { - "query": { - "terms" : { "user" : ["kimchy", "elasticsearch"]} + "query" : { + "terms" : { + "user" : ["kimchy", "elasticsearch"], + "boost" : 1.0 + } } } --------------------------------------------------- +---- // CONSOLE -NOTE: Highlighting `terms` queries is best-effort only, so terms of a `terms` -query might not be highlighted depending on the highlighter implementation that -is selected and on the number of terms in the `terms` query. +[[terms-top-level-params]] +==== Top-level parameters for `terms` +``:: ++ +-- +(Optional, object) Field you wish to search. + +The value of this parameter is an array of terms you wish to find in the +provided field. To return a document, one or more terms must exactly match a +field value, including whitespace and capitalization. + +By default, {es} limits the `terms` query to a maximum of 65,536 +terms. You can change this limit using the <> setting. + +[NOTE] +To use the field values of an existing document as search terms, use the +<> parameters. +-- + +`boost`:: ++ +-- +(Optional, float) Floating point number used to decrease or increase the +<> of a query. Defaults to `1.0`. + +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. + +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. +-- + +[[terms-query-notes]] +==== Notes + +[[query-dsl-terms-query-highlighting]] +===== Highlighting `terms` queries +<> is best-effort only. {es} may not +return highlight results for `terms` queries depending on: + +* Highlighter type +* Number of terms in the query -[float] [[query-dsl-terms-lookup]] -===== Terms lookup mechanism +===== Terms lookup +Terms lookup fetches the field values of an existing document. {es} then uses +those values as search terms. This can be helpful when searching for a large set +of terms. -When it's needed to specify a `terms` filter with a lot of terms it can -be beneficial to fetch those term values from a document in an index. A -concrete example would be to filter tweets tweeted by your followers. -Potentially the amount of user ids specified in the terms filter can be -a lot. In this scenario it makes sense to use the terms filter's terms -lookup mechanism. +Because terms lookup fetches values from a document, the <> mapping field must be enabled to use terms lookup. The `_source` +field is enabled by default. -The terms lookup mechanism supports the following options: +[NOTE] +By default, {es} limits the `terms` query to a maximum of 65,536 +terms. This includes terms fetched using terms lookup. You can change +this limit using the <> setting. -[horizontal] +To perform a terms lookup, use the following parameters. + +[[query-dsl-terms-lookup-params]] +====== Terms lookup parameters `index`:: - The index to fetch the term values from. +(Optional, string) Name of the index from which to fetch field values. `id`:: - The id of the document to fetch the term values from. +(Optional, string) <> of the document from which to fetch +field values. `path`:: - The field specified as path to fetch the actual values for the - `terms` filter. ++ +-- +(Optional, string) Name of the field from which to fetch field values. {es} uses +these values as search terms for the query. + +If the field values include an array of nested inner objects, you can access +those objects using dot notation syntax. +-- `routing`:: - A custom routing value to be used when retrieving the - external terms doc. - -The values for the `terms` filter will be fetched from a field in a -document with the specified id in the specified type and index. -Internally a get request is executed to fetch the values from the -specified path. At the moment for this feature to work the `_source` -needs to be stored. - -Also, consider using an index with a single shard and fully replicated -across all nodes if the "reference" terms data is not large. The lookup -terms filter will prefer to execute the get request on a local node if -possible, reducing the need for networking. - -[WARNING] -Executing a Terms Query request with a lot of terms can be quite slow, -as each additional term demands extra processing and memory. -To safeguard against this, the maximum number of terms that can be used -in a Terms Query both directly or through lookup has been limited to `65536`. -This default maximum can be changed for a particular index with the index setting - `index.max_terms_count`. - -[float] -===== Terms lookup twitter example -At first we index the information for user with id 2, specifically, its -followers, then index a tweet from user with id 1. Finally we search on -all the tweets that match the followers of user 2. +(Optional, string) Custom <> of the +document from which to fetch term values. If a custom routing value was provided +when the document was indexed, this parameter is required. + +[[query-dsl-terms-lookup-example]] +====== Terms lookup example + +To see how terms lookup works, try the following example. + +. Create an index with a `keyword` field named `color`. ++ +-- [source,js] --------------------------------------------------- -PUT /users/_doc/2 +---- +PUT my_index { - "followers" : ["1", "3"] + "mappings" : { + "properties" : { + "color" : { "type" : "keyword" } + } + } } +---- +// CONSOLE +-- -PUT /tweets/_doc/1 +. Index a document with an ID of 1 and values of `["blue", "green"]` in the +`color` field. ++ +-- + +[source,js] +---- +PUT my_index/_doc/1 { - "user" : "1" + "color": ["blue", "green"] } +---- +// CONSOLE +// TEST[continued] +-- -GET /tweets/_search +. Index another document with an ID of 2 and value of `blue` in the `color` +field. ++ +-- + +[source,js] +---- +PUT my_index/_doc/2 { - "query" : { - "terms" : { - "user" : { - "index" : "users", - "id" : "2", - "path" : "followers" - } - } - } + "color": "blue" } --------------------------------------------------- +---- +// CONSOLE +// TEST[continued] +-- + +. Use the `terms` query with terms lookup parameters to find documents +containing one or more of the same terms as document 2. Include the `pretty` +parameter so the response is more readable. ++ +-- + +//// + +[source,js] +---- +POST my_index/_refresh +---- // CONSOLE +// TEST[continued] -The structure of the external terms document can also include an array of -inner objects, for example: +//// [source,js] --------------------------------------------------- -PUT /users/_doc/2 +---- +GET my_index/_search?pretty { - "followers" : [ - { - "id" : "1" - }, - { - "id" : "2" - } - ] + "query": { + "terms": { + "color" : { + "index" : "my_index", + "id" : "2", + "path" : "color" + } + } + } } --------------------------------------------------- +---- // CONSOLE +// TEST[continued] + +Because document 2 and document 1 both contain `blue` as a value in the `color` +field, {es} returns both documents. -In which case, the lookup path will be `followers.id`. +[source,js] +---- +{ + "took" : 17, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 2, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 1.0, + "_source" : { + "color" : [ + "blue", + "green" + ] + } + }, + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "2", + "_score" : 1.0, + "_source" : { + "color" : "blue" + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took" : 17/"took" : $body.took/] +-- \ No newline at end of file diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 3ebfb672e205f..0dfc09d2e3b32 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -1,121 +1,234 @@ [[query-dsl-terms-set-query]] -=== Terms Set Query +=== Terms set query +++++ +Terms set +++++ -Returns any documents that match with at least one or more of the -provided terms. The terms are not analyzed and thus must match exactly. -The number of terms that must match varies per document and is either -controlled by a minimum should match field or computed per document in -a minimum should match script. +Returns documents that contain a minimum number of *exact* terms in a provided +field. -The field that controls the number of required terms that must match must -be a number field: +The `terms_set` query is the same as the <>, except you can define the number of matching terms required to +return a document. For example: + +* A field, `programming_languages`, contains a list of known programming +languages, such as `c++`, `java`, or `php` for job candidates. You can use the +`terms_set` query to return documents that match at least two of these +languages. + +* A field, `permissions`, contains a list of possible user permissions for an +application. You can use the `terms_set` query to return documents that +match a subset of these permissions. + +[[terms-set-query-ex-request]] +==== Example request + +[[terms-set-query-ex-request-index-setup]] +===== Index setup +In most cases, you'll need to include a <> field mapping in +your index to use the `terms_set` query. This numeric field contains the +number of matching terms required to return a document. + +To see how you can set up an index for the `terms_set` query, try the +following example. + +. Create an index, `job-candidates`, with the following field mappings: ++ +-- + +* `name`, a <> field. This field contains the name of the +job candidate. + +* `programming_languages`, a <> field. This field contains +programming languages known by the job candidate. + +* `required_matches`, a <> `long` field. This field contains +the number of matching terms required to return a document. [source,js] --------------------------------------------------- -PUT /my-index +---- +PUT /job-candidates { "mappings": { "properties": { + "name": { + "type": "keyword" + }, + "programming_languages": { + "type": "keyword" + }, "required_matches": { "type": "long" } } } } +---- +// CONSOLE +// TESTSETUP + +-- -PUT /my-index/_doc/1?refresh +. Index a document with an ID of `1` and the following values: ++ +-- + +* `Jane Smith` in the `name` field. + +* `["c++", "java"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. + +Include the `?refresh` parameter so the document is immediately available for +search. + +[source,js] +---- +PUT /job-candidates/_doc/1?refresh { - "codes": ["ghi", "jkl"], + "name": "Jane Smith", + "programming_languages": ["c++", "java"], "required_matches": 2 } +---- +// CONSOLE + +-- + +. Index another document with an ID of `2` and the following values: ++ +-- + +* `Jason Response` in the `name` field. + +* `["java", "php"]` in the `programming_languages` field. + +* `2` in the `required_matches` field. -PUT /my-index/_doc/2?refresh +[source,js] +---- +PUT /job-candidates/_doc/2?refresh { - "codes": ["def", "ghi"], + "name": "Jason Response", + "programming_languages": ["java", "php"], "required_matches": 2 } --------------------------------------------------- +---- // CONSOLE -// TESTSETUP -An example that uses the minimum should match field: +-- + +You can now use the `required_matches` field value as the number of +matching terms required to return a document in the `terms_set` query. + +[[terms-set-query-ex-request-query]] +===== Example query + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `minimum_should_match_field` is `required_matches`. This means the +number of matching terms required is `2`, the value of the `required_matches` +field. [source,js] --------------------------------------------------- -GET /my-index/_search +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_field": "required_matches" } } } } --------------------------------------------------- +---- // CONSOLE -Response: +[[terms-set-top-level-params]] +==== Top-level parameters for `terms_set` -[source,js] --------------------------------------------------- -{ - "took": 13, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped" : 0, - "failed": 0 - }, - "hits": { - "total" : { - "value": 1, - "relation": "eq" - }, - "max_score": 0.87546873, - "hits": [ - { - "_index": "my-index", - "_type": "_doc", - "_id": "2", - "_score": 0.87546873, - "_source": { - "codes": ["def", "ghi"], - "required_matches": 2 - } - } - ] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took": 13,/"took": "$body.took",/] +``:: +(Required, object) Field you wish to search. + +[[terms-set-field-params]] +==== Parameters for `` -Scripts can also be used to control how many terms are required to match -in a more dynamic way. For example a create date or a popularity field -can be used as basis for the number of required terms to match. +`terms`:: ++ +-- +(Required, array of strings) Array of terms you wish to find in the provided +``. To return a document, a required number of terms must exactly match +the field values, including whitespace and capitalization. -Also the `params.num_terms` parameter is available in the script to indicate the -number of terms that have been specified. +The required number of matching terms is defined in the +`minimum_should_match_field` or `minimum_should_match_script` parameter. +-- -An example that always limits the number of required terms to match to never -become larger than the number of terms specified: +`minimum_should_match_field`:: +(Optional, string) <> field containing the number of matching +terms required to return a document. + +`minimum_should_match_script`:: ++ +-- +(Optional, string) Custom script containing the number of matching terms +required to return a document. + +For parameters and valid values, see <>. + +For an example query using the `minimum_should_match_script` parameter, see +<>. +-- + +[[terms-set-query-notes]] +==== Notes + +[[terms-set-query-script]] +===== How to use the `minimum_should_match_script` parameter +You can use `minimum_should_match_script` to define the required number of +matching terms using a script. This is useful if you need to set the number of +required terms dynamically. + +[[terms-set-query-script-ex]] +====== Example query using `minimum_should_match_script` + +The following search returns documents where the `programming_languages` field +contains at least two of the following terms: + +* `c++` +* `java` +* `php` + +The `source` parameter of this query indicates: + +* The required number of terms to match cannot exceed `params.num_terms`, the +number of terms provided in the `terms` field. +* The required number of terms to match is `2`, the value of the +`required_matches` field. [source,js] --------------------------------------------------- -GET /my-index/_search +---- +GET /job-candidates/_search { "query": { "terms_set": { - "codes" : { - "terms" : ["abc", "def", "ghi"], + "programming_languages": { + "terms": ["c++", "java", "php"], "minimum_should_match_script": { "source": "Math.min(params.num_terms, doc['required_matches'].value)" - } + }, + "boost": 1.0 } } } } --------------------------------------------------- -// CONSOLE +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/type-query.asciidoc b/docs/reference/query-dsl/type-query.asciidoc index 9825c68c74f73..4364d1e14e90d 100644 --- a/docs/reference/query-dsl/type-query.asciidoc +++ b/docs/reference/query-dsl/type-query.asciidoc @@ -1,7 +1,7 @@ [[query-dsl-type-query]] === Type Query -deprecated[7.0.0, Types are being removed, prefer filtering on a field instead. For more information, please see <>.] +deprecated[7.0.0,Types and the `type` query are deprecated and in the process of being removed. See <>.] Filters documents matching the provided document / mapping type. diff --git a/docs/reference/query-dsl/wildcard-query.asciidoc b/docs/reference/query-dsl/wildcard-query.asciidoc index ba1c72bb1e53b..95e52352a7696 100644 --- a/docs/reference/query-dsl/wildcard-query.asciidoc +++ b/docs/reference/query-dsl/wildcard-query.asciidoc @@ -1,51 +1,71 @@ [[query-dsl-wildcard-query]] -=== Wildcard Query +=== Wildcard query +++++ +Wildcard +++++ -Matches documents that have fields matching a wildcard expression (*not -analyzed*). Supported wildcards are `*`, which matches any character -sequence (including the empty one), and `?`, which matches any single -character. Note that this query can be slow, as it needs to iterate over many -terms. In order to prevent extremely slow wildcard queries, a wildcard -term should not start with one of the wildcards `*` or `?`. The wildcard -query maps to Lucene `WildcardQuery`. +Returns documents that contain terms matching a wildcard pattern. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "wildcard" : { "user" : "ki*y" } - } -} --------------------------------------------------- -// CONSOLE +A wildcard operator is a placeholder that matches one or more characters. For +example, the `*` wildcard operator matches zero or more characters. You can +combine wildcard operators with other characters to create a wildcard pattern. -A boost can also be associated with the query: +[[wildcard-query-ex-request]] +==== Example request + +The following search returns documents where the `user` field contains a term +that begins with `ki` and ends with `y`. These matching terms can include `kiy`, +`kity`, or `kimchy`. [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "wildcard" : { "user" : { "value" : "ki*y", "boost" : 2.0 } } + "wildcard": { + "user": { + "value": "ki*y", + "boost": 1.0, + "rewrite": "constant_score" + } + } } } --------------------------------------------------- +---- // CONSOLE -Or : +[[wildcard-top-level-params]] +==== Top-level parameters for `wildcard` +``:: +(Required, object) Field you wish to search. -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "wildcard" : { "user" : { "wildcard" : "ki*y", "boost" : 2.0 } } - } -} --------------------------------------------------- -// CONSOLE +[[wildcard-query-field-params]] +==== Parameters for `` +`value`:: +(Required, string) Wildcard pattern for terms you wish to find in the provided +``. ++ +-- +This parameter supports two wildcard operators: + +* `?`, which matches any single character +* `*`, which can match zero or more characters, including an empty one + +WARNING: Avoid beginning patterns with `*` or `?`. This can increase +the iterations needed to find matching terms and slow search performance. +-- + +`boost`:: +(Optional, float) Floating point number used to decrease or increase the +<> of a query. Defaults to `1.0`. ++ +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. ++ +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. -This multi term query allows to control how it gets rewritten using the -<> -parameter. +`rewrite`:: +(Optional, string) Method used to rewrite the query. For valid values and more information, see the +<>. \ No newline at end of file diff --git a/docs/reference/query-dsl/wrapper-query.asciidoc b/docs/reference/query-dsl/wrapper-query.asciidoc index 4ffef5bfc6bcc..010e086056d8f 100644 --- a/docs/reference/query-dsl/wrapper-query.asciidoc +++ b/docs/reference/query-dsl/wrapper-query.asciidoc @@ -1,5 +1,8 @@ [[query-dsl-wrapper-query]] -=== Wrapper Query +=== Wrapper query +++++ +Wrapper +++++ A query that accepts any other query as base64 encoded string. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index fe2954b015a02..cc1f9174799c6 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -16,6 +16,19 @@ command line option to <>. The `_upgrade` API is no longer useful and will be removed. Instead, see <>. +[role="exclude",id="migration-api-assistance"] +=== Migration Assistance API + +The Migration Assistance API has been replaced with the +<>. + +[role="exclude",id="migration-api-upgrade"] +=== Migration Upgrade API + +The Migration Upgrade API has been removed. Use the +{kibana-ref}/upgrade-assistant.html[{kib} Upgrade Assistant] or +<> instead. + [role="exclude",id="docs-bulk-udp"] === Bulk UDP API @@ -576,3 +589,74 @@ See <>. Zen discovery is replaced by the <>. + +[role="exclude",id="settings-xpack"] +=== {xpack} settings in {es} + +include::{asciidoc-dir}/../../shared/settings.asciidoc[] + +[role="exclude",id="_faster_phrase_queries_with_literal_index_phrases_literal"] + +See <>. + +[role="exclude",id="_faster_prefix_queries_with_literal_index_prefixes_literal.html"] + +See <>. + +[role="exclude",id="getting-started-explore"] +=== Exploring your cluster +See <>. + +[role="exclude",id="getting-started-cluster-health"] +=== Cluster health +See <>. + +[role="exclude", id="getting-started-list-indices"] +=== List all indices +See <>. + +[role="exclude", id="getting-started-create-index"] +=== Create an index +See <>. + +[role="exclude", id="getting-started-query-document"] +=== Index and query a document +See <>. + +[role="exclude", id="getting-started-delete-index"] +=== Delete an index +See <>. + +[role="exclude", id="getting-started-modify-data"] +== Modifying your data +See <>. + +[role="exclude", id="indexing-replacing-documents"] +=== Indexing/replacing documents +See <>. + +[role="exclude", id="getting-started-explore-data"] +=== Exploring your data +See <>. + +[role="exclude", id="getting-started-search-API"] +=== Search API +See <>. + +[role="exclude", id="getting-started-conclusion"] +=== Conclusion +See <>. + +[role="exclude",id="ccs-reduction"] +=== {ccs-cap} reduction +See <>. + +[role="exclude",id="es-monitoring"] +=== Monitoring {es} + +See <>. + +[role="exclude",id="configuring-monitoring"] +=== Configuring monitoring in {es} + +See <>. diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 1c31e1cb3db47..a50e065152078 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,10 +6,22 @@ This section summarizes the changes in each release. +* <> +* <> +* <> +* <> +* <> +* <> +* <> * <> * <> -- +include::release-notes/7.1.asciidoc[] +include::release-notes/7.0.asciidoc[] +include::release-notes/7.0.0-rc2.asciidoc[] +include::release-notes/7.0.0-rc1.asciidoc[] +include::release-notes/7.0.0-beta1.asciidoc[] include::release-notes/7.0.0-alpha2.asciidoc[] include::release-notes/7.0.0-alpha1.asciidoc[] diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index 758e88d34024c..2d01200928ac3 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -7,26 +7,453 @@ The changes listed below have been released for the first time in Elasticsearch [float] === Breaking changes -Core:: -* Tribe node has been removed in favor of Cross-Cluster-Search +Aggregations:: +* Remove support for deprecated params._agg/_aggs for scripted metric aggregations {pull}32979[#32979] (issues: {issue}29328[#29328], {issue}31597[#31597]) +* Percentile/Ranks should return null instead of NaN when empty {pull}30460[#30460] (issue: {issue}29066[#29066]) +* Render sum as zero if count is zero for stats aggregation {pull}27193[#27193] (issue: {issue}26893[#26893]) + +Analysis:: +* Remove `delimited_payload_filter` {pull}27705[#27705] (issues: {issue}26625[#26625], {issue}27704[#27704]) +* Limit the number of tokens produced by _analyze {pull}27529[#27529] (issue: {issue}27038[#27038]) +* Add limits for ngram and shingle settings {pull}27211[#27211] (issue: {issue}25887[#25887]) + +Audit:: +* Logfile auditing settings remove after deprecation {pull}35205[#35205] + +Authentication:: +* Security: remove wrapping in put user response {pull}33512[#33512] (issue: {issue}32332[#32332]) + +Authorization:: +* Remove aliases resolution limitations when security is enabled {pull}31952[#31952] (issue: {issue}31516[#31516]) + +CRUD:: +* Version conflict exception message enhancement {pull}29432[#29432] (issue: {issue}21278[#21278]) +* Using ObjectParser in UpdateRequest {pull}29293[#29293] (issue: {issue}28740[#28740]) + +Distributed:: +* Remove undocumented action.master.force_local setting {pull}29351[#29351] +* Remove tribe node support {pull}28443[#28443] +* Forbid negative values for index.unassigned.node_left.delayed_timeout {pull}26828[#26828] + +Features/Indices APIs:: +* Indices Exists API should return 404 for empty wildcards {pull}34499[#34499] +* Default to one shard {pull}30539[#30539] +* Limit the number of nested documents {pull}27405[#27405] (issue: {issue}26962[#26962]) + +Features/Ingest:: +* INGEST: Add Configuration Except. Data to Metdata {pull}32322[#32322] (issue: {issue}27728[#27728]) + +Features/Stats:: +* Remove the suggest metric from stats APIs {pull}29635[#29635] (issue: {issue}29589[#29589]) +* Align cat thread pool info to thread pool config {pull}29195[#29195] (issue: {issue}29123[#29123]) +* Align thread pool info to thread pool configuration {pull}29123[#29123] (issue: {issue}29113[#29113]) + +Geo:: +* Use geohash cell instead of just a corner in geo_bounding_box {pull}30698[#30698] (issue: {issue}25154[#25154]) + +Infra/Circuit Breakers:: +* Introduce durability of circuit breaking exception {pull}34460[#34460] (issue: {issue}31986[#31986]) +* Circuit-break based on real memory usage {pull}31767[#31767] + +Infra/Core:: +* Core: Default node.name to the hostname {pull}33677[#33677] +* Remove bulk fallback for write thread pool {pull}29609[#29609] +* CCS: Drop http address from remote cluster info {pull}29568[#29568] (issue: {issue}29207[#29207]) +* Remove the index thread pool {pull}29556[#29556] +* Main response should not have status 503 when okay {pull}29045[#29045] (issue: {issue}8902[#8902]) +* Automatically prepare indices for splitting {pull}27451[#27451] +* Don't refresh on `_flush` `_force_merge` and `_upgrade` {pull}27000[#27000] (issue: {issue}26972[#26972]) + +Infra/Packaging:: +* Packaging: Remove windows bin files from the tar distribution {pull}30596[#30596] + +Infra/REST API:: +* REST: Remove GET support for clear cache indices {pull}29525[#29525] +* REST : Clear Indices Cache API remove deprecated url params {pull}29068[#29068] -Cross-Cluster-Search:: -* `http_addresses` has been removed from the <> API - because it is expensive to fetch and no longer needed by Kibana. +Infra/Scripting:: +* Remove support for deprecated StoredScript contexts {pull}31394[#31394] (issues: {issue}27612[#27612], {issue}28939[#28939]) +* Scripting: Remove getDate methods from ScriptDocValues {pull}30690[#30690] +* Handle missing and multiple values in script {pull}29611[#29611] (issue: {issue}29286[#29286]) +* Drop `ScriptDocValues#date` and `ScriptDocValues#dates` in 7.0.0 [ISSUE] {pull}23008[#23008] -Rest API:: -* The Clear Cache API only supports `POST` as HTTP method -* `CircuitBreakingException` was previously mapped to HTTP status code 503 and is now - mapped as HTTP status code 429. +Infra/Settings:: +* Remove config prompting for secrets and text {pull}27216[#27216] + +Mapping:: +* Match phrase queries against non-indexed fields should throw an exception {pull}31060[#31060] +* Remove legacy mapping code. {pull}29224[#29224] +* Reject updates to the `_default_` mapping. {pull}29165[#29165] (issues: {issue}15613[#15613], {issue}28248[#28248]) +* Remove the `update_all_types` option. {pull}28288[#28288] +* Remove the `_default_` mapping. {pull}28248[#28248] +* Reject the `index_options` parameter for numeric fields {pull}26668[#26668] (issue: {issue}21475[#21475]) + +Network:: +* Network: Remove http.enabled setting {pull}29601[#29601] (issue: {issue}12792[#12792]) +* Remove HTTP max content length leniency {pull}29337[#29337] + +Percolator:: +* remove deprecated percolator map_unmapped_fields_as_string setting {pull}28060[#28060] + +Ranking:: +* Add minimal sanity checks to custom/scripted similarities. {pull}33564[#33564] (issue: {issue}33309[#33309]) +* Scroll queries asking for rescore are considered invalid {pull}32918[#32918] (issue: {issue}31775[#31775]) + +Search:: +* Remove deprecated url parameters `_source_include` and `_source_exclude` {pull}35097[#35097] (issues: {issue}22792[#22792], {issue}33475[#33475]) +* Disallow negative query boost {pull}34486[#34486] (issue: {issue}33309[#33309]) +* Forbid negative `weight` in Function Score Query {pull}33390[#33390] (issue: {issue}31927[#31927]) +* In the field capabilities API, remove support for providing fields in the request body. {pull}30185[#30185] +* Remove deprecated options for query_string {pull}29203[#29203] (issue: {issue}25551[#25551]) +* Fix Laplace scorer to multiply by alpha (and not add) {pull}27125[#27125] +* Remove _primary and _replica shard preferences {pull}26791[#26791] (issue: {issue}26335[#26335]) +* Limit the number of expanded fields it query_string and simple_query_string {pull}26541[#26541] (issue: {issue}25105[#25105]) +* Make purely negative queries return scores of 0. {pull}26015[#26015] (issue: {issue}23449[#23449]) + +Snapshot/Restore:: +* Include size of snapshot in snapshot metadata {pull}30890[#30890] (issue: {issue}18543[#18543]) +* Remove azure deprecated settings {pull}26099[#26099] (issue: {issue}23405[#23405]) + +Store:: +* drop elasticsearch-translog for 7.0 {pull}33373[#33373] (issues: {issue}31389[#31389], {issue}32281[#32281]) +* completely drop `index.shard.check_on_startup: fix` for 7.0 {pull}33194[#33194] + +Suggesters:: +* Fix threshold frequency computation in Suggesters {pull}34312[#34312] (issue: {issue}34282[#34282]) +* Make Geo Context Mapping Parsing More Strict {pull}32821[#32821] (issues: {issue}32202[#32202], {issue}32412[#32412]) +* Make Geo Context Parsing More Strict {pull}32412[#32412] (issue: {issue}32202[#32202]) +* Remove the ability to index or query context suggestions without context {pull}31007[#31007] (issue: {issue}30712[#30712]) + + + +[[breaking-java-7.0.0-alpha1]] +[float] +=== Breaking Java changes Aggregations:: -* The Percentiles and PercentileRanks aggregations now return `null` in the REST response, - instead of `NaN`. This makes it consistent with the rest of the aggregations. Note: - this only applies to the REST response, the java objects continue to return `NaN` (also - consistent with other aggregations) +* Change GeoHashGrid.Bucket#getKey() to return String {pull}31748[#31748] (issue: {issue}30320[#30320]) + +Analysis:: +* Remove deprecated AnalysisPlugin#requriesAnalysisSettings method {pull}32037[#32037] (issue: {issue}32025[#32025]) + +Features/Java High Level REST Client:: +* API: Drop deprecated methods from Retry {pull}33925[#33925] +* REST hl client: cluster health to default to cluster level {pull}31268[#31268] (issue: {issue}29331[#29331]) +* REST high-level Client: remove deprecated API methods {pull}31200[#31200] (issue: {issue}31069[#31069]) + +Features/Java Low Level REST Client:: +* LLREST: Drop deprecated methods {pull}33223[#33223] (issues: {issue}29623[#29623], {issue}30315[#30315]) + +Geo:: +* [Geo] Decouple geojson parse logic from ShapeBuilders {pull}27212[#27212] + +Infra/Core:: +* Core: Remove RequestBuilder from Action {pull}30966[#30966] + +Infra/Transport API:: +* Java api clean up: remove deprecated `isShardsAcked` {pull}28311[#28311] (issues: {issue}27784[#27784], {issue}27819[#27819]) + +[[deprecation-7.0.0-alpha1]] +[float] +=== Deprecations + +Analysis:: +* Replace parameter unicodeSetFilter with unicode_set_filter {pull}29215[#29215] (issue: {issue}22823[#22823]) +* Replace delimited_payload_filter by delimited_payload {pull}26625[#26625] (issue: {issue}21978[#21978]) + +Features/Indices APIs:: +* Default copy settings to true and deprecate on the REST layer {pull}30598[#30598] + +Infra/Transport API:: +* Deprecate the transport client in favour of the high-level REST client {pull}27085[#27085] + +Mapping:: +* Deprecate type exists requests. {pull}34663[#34663] + +Search:: +* Deprecate filtering on `_type`. {pull}29468[#29468] (issue: {issue}15613[#15613]) + + + +[[feature-7.0.0-alpha1]] +[float] +=== New features + +Analysis:: +* Relax TermVectors API to work with textual fields other than TextFieldType {pull}31915[#31915] (issue: {issue}31902[#31902]) + +CCR:: +* Generalize search.remote settings to cluster.remote {pull}33413[#33413] + +Distributed:: +* log messages from allocation commands {pull}25955[#25955] (issues: {issue}22821[#22821], {issue}25325[#25325]) + +Features/Ingest:: +* Revert "Introduce a Hashing Processor (#31087)" {pull}32178[#32178] +* Add ingest-attachment support for per document `indexed_chars` limit {pull}28977[#28977] (issue: {issue}28942[#28942]) + +Features/Java High Level REST Client:: +* GraphClient for the high level REST client and associated tests {pull}32366[#32366] + +Features/Monitoring:: +* [Elasticsearch Monitoring] Collect only display_name (for now) {pull}35265[#35265] (issue: {issue}8445[#8445]) + +Infra/Core:: +* Skip shard refreshes if shard is `search idle` {pull}27500[#27500] + +Infra/Logging:: +* Logging: Unify log rotation for index/search slow log {pull}27298[#27298] + +Infra/Plugins:: +* Reload secure settings for plugins {pull}31383[#31383] (issue: {issue}29135[#29135]) + +Infra/REST API:: +* Add an `include_type_name` option. {pull}29453[#29453] (issue: {issue}15613[#15613]) + +Machine Learning:: +* [ML] Filter undefined job groups from update job calendar actions {pull}30757[#30757] + +Mapping:: +* Add a `feature_vector` field. {pull}31102[#31102] (issue: {issue}27552[#27552]) +* Expose Lucene's FeatureField. {pull}30618[#30618] + +Ranking:: +* Add ranking evaluation API {pull}27478[#27478] (issue: {issue}19195[#19195]) + +Recovery:: +* Allow to trim all ops above a certain seq# with a term lower than X, … {pull}31211[#31211] (issue: {issue}10708[#10708]) + +SQL:: +* SQL: Add basic support for ST_AsWKT geo function {pull}34205[#34205] +* SQL: Add support for SYS GEOMETRY_COLUMNS {pull}30496[#30496] (issue: {issue}29872[#29872]) + +Search:: +* Add “took” timing info to response for _msearch/template API {pull}30961[#30961] (issue: {issue}30957[#30957]) +* Expose the lucene Matches API to searches [ISSUE] {pull}29631[#29631] +* Add allow_partial_search_results flag to search requests with default setting true {pull}28440[#28440] (issue: {issue}27435[#27435]) +* Enable adaptive replica selection by default {pull}26522[#26522] (issue: {issue}24915[#24915]) Suggesters:: -* Plugins that register suggesters can now define their own types of suggestions and must - explicitly indicate the type of suggestion that they produce. Existing plugins will - require changes to their plugin registration. See the `custom-suggester` example - plugin {pull}30284[#30284] +* serialize suggestion responses as named writeables {pull}30284[#30284] (issue: {issue}26585[#26585]) + + + +[[enhancement-7.0.0-alpha1]] +[float] +=== Enhancements + +Aggregations:: +* Uses MergingDigest instead of AVLDigest in percentiles agg {pull}28702[#28702] (issue: {issue}19528[#19528]) + +Discovery-Plugins:: +* Rename discovery.zen.minimum_master_nodes [ISSUE] {pull}14058[#14058] + +Engine:: +* Remove versionType from translog {pull}31945[#31945] +* do retry if primary fails on AsyncAfterWriteAction {pull}31857[#31857] (issues: {issue}31716[#31716], {issue}31755[#31755]) +* handle AsyncAfterWriteAction exception before listener is registered {pull}31755[#31755] (issue: {issue}31716[#31716]) +* Use IndexWriter#flushNextBuffer to free memory {pull}27753[#27753] +* Remove pre 6.0.0 support from InternalEngine {pull}27720[#27720] + +Features/Indices APIs:: +* Add cluster-wide shard limit {pull}32856[#32856] (issue: {issue}20705[#20705]) +* Remove RestGetAllAliasesAction {pull}31308[#31308] (issue: {issue}31129[#31129]) +* Add rollover-creation-date setting to rolled over index {pull}31144[#31144] (issue: {issue}30887[#30887]) +* add is-write-index flag to aliases {pull}30942[#30942] +* Make index and bulk APIs work without types. {pull}29479[#29479] + +Features/Ingest:: +* ingest: Add ignore_missing property to foreach filter (#22147) {pull}31578[#31578] (issue: {issue}22147[#22147]) + +Features/Java High Level REST Client:: +* HLRC API for _termvectors {pull}32610[#32610] (issue: {issue}27205[#27205]) + +Features/Stats:: +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}26973[#26973] + +Features/Watcher:: +* Watcher: Validate email adresses when storing a watch {pull}34042[#34042] (issue: {issue}33980[#33980]) + +Infra/Circuit Breakers:: +* Have circuit breaker succeed on unknown mem usage {pull}33125[#33125] (issue: {issue}31767[#31767]) +* Account for XContent overhead in in-flight breaker {pull}31613[#31613] +* Script Stats: Add compilation limit counter to stats {pull}26387[#26387] + +Infra/Core:: +* Add RunOnce utility class that executes a Runnable exactly once {pull}35484[#35484] +* Improved IndexNotFoundException's default error message {pull}34649[#34649] (issue: {issue}34628[#34628]) +* Set a bounded default for http.max_warning_header_count [ISSUE] {pull}33479[#33479] + +Infra/Packaging:: +* Choose JVM options ergonomically {pull}30684[#30684] + +Infra/REST API:: +* Remove hand-coded XContent duplicate checks {pull}34588[#34588] (issues: {issue}22073[#22073], {issue}22225[#22225], {issue}22253[#22253]) +* Add the `include_type_name` option to the search and document APIs. {pull}29506[#29506] (issue: {issue}15613[#15613]) +* Validate `op_type` for `_create` {pull}27483[#27483] + +Infra/Scripting:: +* Tests: Add support for custom contexts to mock scripts {pull}34100[#34100] +* Scripting: Reflect factory signatures in painless classloader {pull}34088[#34088] +* Handle missing values in painless {pull}32207[#32207] (issue: {issue}29286[#29286]) + +Infra/Settings:: +* Settings: Add keystore creation to add commands {pull}26126[#26126] + +Infra/Transport API:: +* Change BWC version for VerifyRepositoryResponse {pull}30796[#30796] (issue: {issue}30762[#30762]) + +Network:: +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Add class for serializing message to bytes {pull}29384[#29384] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) + +Ranking:: +* Add k parameter to PrecisionAtK metric {pull}27569[#27569] + +SQL:: +* SQL: Introduce support for NULL values {pull}34573[#34573] (issue: {issue}32079[#32079]) + +Search:: +* Make limit on number of expanded fields configurable {pull}35284[#35284] (issues: {issue}26541[#26541], {issue}34778[#34778]) +* Search: Simply SingleFieldsVisitor {pull}34052[#34052] +* Don't count hits via the collector if the hit count can be computed from index stats. {pull}33701[#33701] +* Limit the number of concurrent requests per node {pull}31206[#31206] (issue: {issue}31192[#31192]) +* Default max concurrent search req. numNodes * 5 {pull}31171[#31171] (issues: {issue}30783[#30783], {issue}30994[#30994]) +* Change ScriptException status to 400 (bad request) {pull}30861[#30861] (issue: {issue}12315[#12315]) +* Change default value to true for transpositions parameter of fuzzy query {pull}26901[#26901] +* Introducing "took" time (in ms) for `_msearch` {pull}23767[#23767] (issue: {issue}23131[#23131]) + +Snapshot/Restore:: +* #31608 Add S3 Setting to Force Path Type Access {pull}34721[#34721] (issue: {issue}31608[#31608]) + +Store:: +* add RemoveCorruptedShardDataCommand {pull}32281[#32281] (issues: {issue}31389[#31389], {issue}32279[#32279]) + +ZenDiscovery:: +* [Zen2] Introduce vote withdrawal {pull}35446[#35446] +* Zen2: Add basic Zen1 transport-level BWC {pull}35443[#35443] +* Zen2: Add diff-based publishing {pull}35290[#35290] +* [Zen2] Introduce auto_shrink_voting_configuration setting {pull}35217[#35217] +* Introduce transport API for cluster bootstrapping {pull}34961[#34961] +* [Zen2] Reconfigure cluster as its membership changes {pull}34592[#34592] (issue: {issue}33924[#33924]) +* Zen2: Fail fast on disconnects {pull}34503[#34503] +* [Zen2] Add storage-layer disruptions to CoordinatorTests {pull}34347[#34347] +* [Zen2] Add low-level bootstrap implementation {pull}34345[#34345] +* [Zen2] Gather votes from all nodes {pull}34335[#34335] +* Zen2: Add Cluster State Applier {pull}34257[#34257] +* [Zen2] Add safety phase to CoordinatorTests {pull}34241[#34241] +* [Zen2] Integrate FollowerChecker with Coordinator {pull}34075[#34075] +* Integrate LeaderChecker with Coordinator {pull}34049[#34049] +* Zen2: Trigger join when active master detected {pull}34008[#34008] +* Zen2: Update PeerFinder term on term bump {pull}33992[#33992] +* [Zen2] Calculate optimal cluster configuration {pull}33924[#33924] +* [Zen2] Introduce FollowersChecker {pull}33917[#33917] +* Zen2: Integrate publication pipeline into Coordinator {pull}33771[#33771] +* Zen2: Add DisruptableMockTransport {pull}33713[#33713] +* [Zen2] Implement basic cluster formation {pull}33668[#33668] +* [Zen2] Introduce LeaderChecker {pull}33024[#33024] +* Zen2: Add leader-side join handling logic {pull}33013[#33013] +* [Zen2] Add PeerFinder#onFoundPeersUpdated {pull}32939[#32939] +* [Zen2] Introduce PreVoteCollector {pull}32847[#32847] +* [Zen2] Introduce ElectionScheduler {pull}32846[#32846] +* [Zen2] Introduce ElectionScheduler {pull}32709[#32709] +* [Zen2] Add HandshakingTransportAddressConnector {pull}32643[#32643] (issue: {issue}32246[#32246]) +* [Zen2] Add UnicastConfiguredHostsResolver {pull}32642[#32642] (issue: {issue}32246[#32246]) +* Zen2: Cluster state publication pipeline {pull}32584[#32584] (issue: {issue}32006[#32006]) +* [Zen2] Introduce gossip-like discovery of master nodes {pull}32246[#32246] +* Add core coordination algorithm for cluster state publishing {pull}32171[#32171] (issue: {issue}32006[#32006]) +* Add term and config to cluster state {pull}32100[#32100] (issue: {issue}32006[#32006]) + + + +[[bug-7.0.0-alpha1]] +[float] +=== Bug fixes + +Aggregations:: +* Fix InternalAutoDateHistogram reproducible failure {pull}32723[#32723] (issue: {issue}32215[#32215]) + +Analysis:: +* Close #26771: beider_morse phonetic encoder failure when languageset unspecified {pull}26848[#26848] (issue: {issue}26771[#26771]) + +Authorization:: +* Empty GetAliases authorization fix {pull}34444[#34444] (issue: {issue}31952[#31952]) + +Docs Infrastructure:: +* Docs build fails due to missing nexus.png [ISSUE] {pull}33101[#33101] + +Features/Indices APIs:: +* Validate top-level keys for create index request (#23755) {pull}23869[#23869] (issue: {issue}23755[#23755]) + +Features/Ingest:: +* INGEST: Fix Deprecation Warning in Script Proc. {pull}32407[#32407] + +Features/Java High Level REST Client:: +* HLRC: Drop extra level from user parser {pull}34932[#34932] + +Features/Java Low Level REST Client:: +* Remove I/O pool blocking sniffing call from onFailure callback, add some logic around host exclusion {pull}27985[#27985] (issue: {issue}27984[#27984]) + +Features/Watcher:: +* Watcher: Ignore system locale/timezone in croneval CLI tool {pull}33215[#33215] + +Geo:: +* [build] Test `GeoShapeQueryTests#testPointsOnly` fails [ISSUE] {pull}27454[#27454] + +Infra/Core:: +* Ensure shard is refreshed once it's inactive {pull}27559[#27559] (issue: {issue}27500[#27500]) + +Infra/Settings:: +* Change format how settings represent lists / array {pull}26723[#26723] + +Infra/Transport API:: +* Remove version read/write logic in Verify Response {pull}30879[#30879] (issue: {issue}30807[#30807]) +* Enable muted Repository test {pull}30875[#30875] (issue: {issue}30807[#30807]) +* Bad regex in CORS settings should throw a nicer error {pull}29108[#29108] + +License:: +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) + +Mapping:: +* Ensure that field aliases cannot be used in multi-fields. {pull}32219[#32219] + +Network:: +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Netty4SizeHeaderFrameDecoder error {pull}31057[#31057] +* Fix memory leak in http pipelining {pull}30815[#30815] (issue: {issue}30801[#30801]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] + +Search:: +* Ensure realtime `_get` and `_termvectors` don't run on the network thread {pull}33814[#33814] (issue: {issue}27500[#27500]) +* [bug] fuzziness custom auto {pull}33462[#33462] (issue: {issue}33454[#33454]) +* Fix inner hits retrieval when stored fields are disabled (_none_) {pull}33018[#33018] (issue: {issue}32941[#32941]) +* Set maxScore for empty TopDocs to Nan rather than 0 {pull}32938[#32938] +* Handle leniency for cross_fields type in multi_match query {pull}27045[#27045] (issue: {issue}23210[#23210]) +* Raise IllegalArgumentException instead if query validation failed {pull}26811[#26811] (issue: {issue}26799[#26799]) + +Security:: +* Handle 6.4.0+ BWC for Application Privileges {pull}32929[#32929] + +ZenDiscovery:: +* [Zen2] Remove duplicate discovered peers {pull}35505[#35505] + + +[[upgrade-7.0.0-alpha1]] +[float] +=== Upgrades + +Geo:: +* Upgrade JTS to 1.14.0 {pull}29141[#29141] (issue: {issue}29122[#29122]) + +Infra/Core:: +* Upgrade to a Lucene 8 snapshot {pull}33310[#33310] (issues: {issue}32899[#32899], {issue}33028[#33028], {issue}33309[#33309]) + +Network:: +* NETWORKING: Fix Netty Leaks by upgrading to 4.1.28 {pull}32511[#32511] (issue: {issue}32487[#32487]) diff --git a/docs/reference/release-notes/7.0.0-beta1.asciidoc b/docs/reference/release-notes/7.0.0-beta1.asciidoc new file mode 100644 index 0000000000000..660289a0f0a33 --- /dev/null +++ b/docs/reference/release-notes/7.0.0-beta1.asciidoc @@ -0,0 +1,686 @@ +[[release-notes-7.0.0-beta1]] +== {es} version 7.0.0-beta1 + +Also see <>. + +[[breaking-7.0.0-beta1]] +[float] +=== Breaking changes + +Audit:: +* Remove index audit output type {pull}37707[#37707] (issues: {issue}29881[#29881], {issue}37301[#37301]) + +Authentication:: +* Remove bwc logic for token invalidation {pull}36893[#36893] (issue: {issue}36727[#36727]) + +Authorization:: +* Remove implicit index monitor privilege {pull}37774[#37774] + +CCR:: +* Follow stats api should return a 404 when requesting stats for a non existing index {pull}37220[#37220] (issue: {issue}37021[#37021]) + +CRUD:: +* Remove support for internal versioning for concurrency control {pull}38254[#38254] (issue: {issue}1078[#1078]) + +Features/Ingest:: +* Add ECS schema for user-agent ingest processor (#37727) {pull}37984[#37984] (issues: {issue}37329[#37329], {issue}37727[#37727]) +* Remove special handling for ingest plugins {pull}36967[#36967] (issues: {issue}36898[#36898], {issue}36956[#36956]) + +Features/Java Low Level REST Client:: +* Drop support for the low-level REST client on JDK 7 {pull}38540[#38540] (issue: {issue}29607[#29607]) + +Features/Watcher:: +* Remove Watcher Account "unsecure" settings {pull}36736[#36736] (issue: {issue}36403[#36403]) + +Infra/Logging:: +* Elasticsearch json logging {pull}36833[#36833] (issue: {issue}32850[#32850]) + +Infra/Packaging:: +* Package ingest-user-agent as a module {pull}36956[#36956] +* Package ingest-geoip as a module {pull}36898[#36898] + +Machine Learning:: +* [ML] Remove types from datafeed {pull}36538[#36538] (issue: {issue}34265[#34265]) + +Mapping:: +* Make sure to reject mappings with type _doc when include_type_name is false. {pull}38270[#38270] (issue: {issue}38266[#38266]) +* Update the default for include_type_name to false. {pull}37285[#37285] +* Support 'include_type_name' in RestGetIndicesAction {pull}37149[#37149] + +Network:: +* Remove TLS 1.0 as a default SSL protocol {pull}37512[#37512] (issue: {issue}36021[#36021]) +* Security: remove SSL settings fallback {pull}36846[#36846] (issue: {issue}29797[#29797]) + +Ranking:: +* Forbid negative field boosts in analyzed queries {pull}37930[#37930] (issue: {issue}33309[#33309]) + +Search:: +* Track total hits up to 10,000 by default {pull}37466[#37466] (issue: {issue}33028[#33028]) +* Use mappings to format doc-value fields by default. {pull}30831[#30831] (issues: {issue}26948[#26948], {issue}29639[#29639]) + +Security:: +* Remove heuristics that enable security on trial licenses {pull}38075[#38075] (issue: {issue}38009[#38009]) + +ZenDiscovery:: +* Remove DiscoveryPlugin#getDiscoveryTypes {pull}38414[#38414] (issue: {issue}38410[#38410]) + + + +[[breaking-java-7.0.0-beta1]] +[float] +=== Breaking Java changes + +Features/Java Low Level REST Client:: +* Remove support for maxRetryTimeout from low-level REST client {pull}38085[#38085] (issues: {issue}25951[#25951], {issue}31834[#31834], {issue}33342[#33342]) + +Infra/Core:: +* Handle scheduler exceptions {pull}38014[#38014] (issues: {issue}28667[#28667], {issue}36137[#36137], {issue}37708[#37708]) + + + +[[deprecation-7.0.0-beta1]] +[float] +=== Deprecations + +Aggregations:: +* Deprecate dots in aggregation names {pull}31468[#31468] (issues: {issue}17600[#17600], {issue}19040[#19040]) + +Analysis:: +* [Analysis] Deprecate Standard Html Strip Analyzer in master {pull}26719[#26719] (issue: {issue}4704[#4704]) + +Audit:: +* Deprecate index audit output type {pull}37301[#37301] (issue: {issue}29881[#29881]) + +Features/Indices APIs:: +* Reject setting index.optimize_auto_generated_id after version 7.0.0 {pull}28895[#28895] (issue: {issue}27600[#27600]) + +Features/Ingest:: +* Deprecate `_type` in simulate pipeline requests {pull}37949[#37949] (issue: {issue}37731[#37731]) + +Features/Java High Level REST Client:: +* Deprecate HLRC security methods {pull}37883[#37883] (issues: {issue}36938[#36938], {issue}37540[#37540]) +* Deprecate HLRC EmptyResponse used by security {pull}37540[#37540] (issue: {issue}36938[#36938]) + +Features/Watcher:: +* Deprecate xpack.watcher.history.cleaner_service.enabled {pull}37782[#37782] (issue: {issue}32041[#32041]) +* deprecate types for watcher {pull}37594[#37594] (issue: {issue}35190[#35190]) + +Infra/Core:: +* Core: Deprecate negative epoch timestamps {pull}36793[#36793] +* Core: Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] + +Infra/Scripting:: +* Add types deprecation to script contexts {pull}37554[#37554] +* Deprecate _type from LeafDocLookup {pull}37491[#37491] +* Scripting: Remove deprecated params.ctx {pull}36848[#36848] (issue: {issue}34059[#34059]) + +Machine Learning:: +* Adding ml_settings entry to HLRC and Docs for deprecation_info {pull}38118[#38118] +* [ML] Datafeed deprecation checks {pull}38026[#38026] (issue: {issue}37932[#37932]) +* [ML] Remove "8" prefixes from file structure finder timestamp formats {pull}38016[#38016] +* [ML] Adjust structure finder for Joda to Java time migration {pull}37306[#37306] +* [ML] Resolve 7.0.0 TODOs in ML code {pull}36842[#36842] (issue: {issue}29963[#29963]) + +Mapping:: +* Deprecate types in rollover index API {pull}38039[#38039] (issue: {issue}35190[#35190]) +* Deprecate types in get field mapping API {pull}37667[#37667] (issue: {issue}35190[#35190]) +* Deprecate types in the put mapping API. {pull}37280[#37280] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Support include_type_name in the field mapping and index template APIs. {pull}37210[#37210] +* Deprecate types in create index requests. {pull}37134[#37134] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Deprecate use of the _type field in aggregations. {pull}37131[#37131] (issue: {issue}36802[#36802]) +* Deprecate reference to _type in lookup queries {pull}37016[#37016] (issue: {issue}35190[#35190]) +* Deprecate the document create endpoint. {pull}36863[#36863] +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate types in update APIs {pull}36225[#36225] + +Search:: +* Deprecate use of type in reindex request body {pull}36823[#36823] +* Add typless endpoints for get_source and exist_source {pull}36426[#36426] + + + +[[feature-7.0.0-beta1]] +[float] +=== New features + +Authentication:: +* Add support for API keys to access Elasticsearch {pull}38291[#38291] (issue: {issue}34383[#34383]) +* OIDC realm authentication flows {pull}37787[#37787] +* [WIP] OIDC Realm JWT+JWS related functionality {pull}37272[#37272] (issues: {issue}35339[#35339], {issue}37009[#37009]) +* OpenID Connect Realm base functionality {pull}37009[#37009] (issue: {issue}35339[#35339]) + +Authorization:: +* Allow custom authorization with an authorization engine {pull}38358[#38358] (issues: {issue}32435[#32435], {issue}36245[#36245], {issue}37328[#37328], {issue}37495[#37495], {issue}37785[#37785], {issue}38137[#38137], {issue}38219[#38219]) +* WIldcard IndicesPermissions don't cover .security {pull}36765[#36765] + +CCR:: +* Add ccr follow info api {pull}37408[#37408] (issue: {issue}37127[#37127]) + +Features/ILM:: +* [ILM] Add unfollow action {pull}36970[#36970] (issue: {issue}34648[#34648]) + +Geo:: +* geotile_grid implementation {pull}37842[#37842] (issue: {issue}30240[#30240]) +* [GEO] Fork Lucene's LatLonShape Classes to local lucene package {pull}36794[#36794] +* [Geo] Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* [Geo] Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) + +Machine Learning:: +* ML: Adds set_upgrade_mode API endpoint {pull}37837[#37837] + +Mapping:: +* Give precedence to index creation when mixing typed templates with typeless index creation and vice-versa. {pull}37871[#37871] (issue: {issue}37773[#37773]) +* Add nanosecond field mapper {pull}37755[#37755] (issues: {issue}27330[#27330], {issue}32601[#32601]) + +SQL:: +* SQL: Allow sorting of groups by aggregates {pull}38042[#38042] (issue: {issue}35118[#35118]) +* SQL: Implement FIRST/LAST aggregate functions {pull}37936[#37936] (issue: {issue}35639[#35639]) +* SQL: Introduce SQL DATE data type {pull}37693[#37693] (issue: {issue}37340[#37340]) + +Search:: +* Introduce ability to minimize round-trips in CCS {pull}37828[#37828] (issues: {issue}32125[#32125], {issue}37566[#37566]) +* Add script filter to intervals {pull}36776[#36776] +* Add the ability to set the number of hits to track accurately {pull}36357[#36357] (issue: {issue}33028[#33028]) +* Add a maximum search request size. {pull}26423[#26423] + + + +[[enhancement-7.0.0-beta1]] +[float] +=== Enhancements + +Aggregations:: +* Add Composite to AggregationBuilders {pull}38207[#38207] (issue: {issue}38020[#38020]) +* Allow nested fields in the composite aggregation {pull}37178[#37178] (issue: {issue}28611[#28611]) +* Remove single shard optimization when suggesting shard_size {pull}37041[#37041] (issue: {issue}32125[#32125]) +* Use List instead of priority queue for stable sorting in bucket sort aggregator {pull}36748[#36748] (issue: {issue}36322[#36322]) +* Keys are compared in BucketSortPipelineAggregation so making key type… {pull}36407[#36407] + +Allocation:: +* Fail start on obsolete indices documentation {pull}37786[#37786] (issue: {issue}27073[#27073]) +* Fail start on invalid index metadata {pull}37748[#37748] (issue: {issue}27073[#27073]) +* Fail start of non-data node if node has data {pull}37347[#37347] (issue: {issue}27073[#27073]) + +Analysis:: +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) + +Audit:: +* Security Audit includes HTTP method for requests {pull}37322[#37322] (issue: {issue}29765[#29765]) +* Add X-Forwarded-For to the logfile audit {pull}36427[#36427] + +Authentication:: +* Security: propagate auth result to listeners {pull}36900[#36900] (issue: {issue}30794[#30794]) +* Security: reorder realms based on last success {pull}36878[#36878] +* Improve error message for 6.x style realm settings {pull}36876[#36876] (issues: {issue}30241[#30241], {issue}36026[#36026]) +* Change missing authn message to not mention tokens {pull}36750[#36750] +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Enhance Invalidate Token API {pull}35388[#35388] (issues: {issue}34556[#34556], {issue}35115[#35115]) + +Authorization:: +* Add apm_user reserved role {pull}38206[#38206] +* Permission for restricted indices {pull}37577[#37577] (issue: {issue}34454[#34454]) +* Remove kibana_user and kibana_dashboard_only_user index privileges {pull}37441[#37441] +* Create snapshot role {pull}35820[#35820] (issue: {issue}34454[#34454]) + +CCR:: +* Concurrent file chunk fetching for CCR restore {pull}38495[#38495] +* Tighten mapping syncing in ccr remote restore {pull}38071[#38071] (issues: {issue}36879[#36879], {issue}37887[#37887]) +* Do not allow put mapping on follower {pull}37675[#37675] (issue: {issue}30086[#30086]) +* Added ccr to xpack usage infrastructure {pull}37256[#37256] (issue: {issue}37221[#37221]) +* [CCR] FollowingEngine should fail with 403 if operation has no seqno assigned {pull}37213[#37213] +* [CCR] Added auto_follow_exception.timestamp field to auto follow stats {pull}36947[#36947] +* [CCR] Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) + +CRUD:: +* Add Seq# based optimistic concurrency control to UpdateRequest {pull}37872[#37872] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Introduce ssl settings to reindex from remote {pull}37527[#37527] (issues: {issue}29755[#29755], {issue}37287[#37287]) +* Use Sequence number powered OCC for processing updates {pull}37308[#37308] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Document Seq No powered optimistic concurrency control {pull}37284[#37284] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Enable IPv6 URIs in reindex from remote {pull}36874[#36874] +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Set acking timeout to 0 on dynamic mapping update {pull}31140[#31140] (issues: {issue}30672[#30672], {issue}30844[#30844]) + +Distributed:: +* Recover retention leases during peer recovery {pull}38435[#38435] (issue: {issue}37165[#37165]) +* Lift retention lease expiration to index shard {pull}38380[#38380] (issues: {issue}37165[#37165], {issue}37963[#37963], {issue}38070[#38070]) +* Introduce retention lease background sync {pull}38262[#38262] (issue: {issue}37165[#37165]) +* Allow shards of closed indices to be replicated as regular shards {pull}38024[#38024] (issue: {issue}33888[#33888]) +* Expose retention leases in shard stats {pull}37991[#37991] (issue: {issue}37165[#37165]) +* Introduce retention leases versioning {pull}37951[#37951] (issue: {issue}37165[#37165]) +* Soft-deletes policy should always fetch latest leases {pull}37940[#37940] (issues: {issue}37165[#37165], {issue}37375[#37375]) +* Sync retention leases on expiration {pull}37902[#37902] (issue: {issue}37165[#37165]) +* Ignore shard started requests when primary term does not match {pull}37899[#37899] (issue: {issue}33888[#33888]) +* Move update and delete by query to use seq# for optimistic concurrency control {pull}37857[#37857] (issues: {issue}10708[#10708], {issue}36148[#36148], {issue}37639[#37639]) +* Introduce retention lease serialization {pull}37447[#37447] (issues: {issue}37165[#37165], {issue}37398[#37398]) +* Add run under primary permit method {pull}37440[#37440] (issue: {issue}37398[#37398]) +* Introduce retention lease syncing {pull}37398[#37398] (issue: {issue}37165[#37165]) +* Introduce retention lease persistence {pull}37375[#37375] (issue: {issue}37165[#37165]) +* Add validation for retention lease construction {pull}37312[#37312] (issue: {issue}37165[#37165]) +* Introduce retention lease expiration {pull}37195[#37195] (issue: {issue}37165[#37165]) +* Introduce shard history retention leases {pull}37167[#37167] (issue: {issue}37165[#37165]) +* [Close Index API] Add unique UUID to ClusterBlock {pull}36775[#36775] +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Propagate tasks ids between Freeze, Close and Verify Shard actions {pull}36630[#36630] +* Always initialize the global checkpoint {pull}34381[#34381] + +Engine:: +* Specialize pre-closing checks for engine implementations {pull}38702[#38702] +* Ensure that max seq # is equal to the global checkpoint when creating ReadOnlyEngines {pull}37426[#37426] +* Enable Bulk-Merge if all source remains {pull}37269[#37269] +* Rename setting to enable mmap {pull}37070[#37070] (issue: {issue}36668[#36668]) +* Add hybridfs store type {pull}36668[#36668] +* Introduce time-based retention policy for soft-deletes {pull}34943[#34943] (issue: {issue}34908[#34908]) +* handle AsyncAfterWriteAction failure on primary in the same way as failures on replicas {pull}31969[#31969] (issues: {issue}31716[#31716], {issue}31755[#31755]) + +Features/CAT APIs:: +* Expose `search.throttled` on `_cat/indices` {pull}37073[#37073] (issue: {issue}34352[#34352]) + +Features/Features:: +* Run Node deprecation checks locally (#38065) {pull}38250[#38250] (issue: {issue}38065[#38065]) + +Features/ILM:: +* Ensure ILM policies run safely on leader indices {pull}38140[#38140] (issue: {issue}34648[#34648]) +* Skip Shrink when numberOfShards not changed {pull}37953[#37953] (issue: {issue}33275[#33275]) +* Inject Unfollow before Rollover and Shrink {pull}37625[#37625] (issue: {issue}34648[#34648]) +* Add set_priority action to ILM {pull}37397[#37397] (issue: {issue}36905[#36905]) +* [ILM] Add Freeze Action {pull}36910[#36910] (issue: {issue}34630[#34630]) + +Features/Indices APIs:: +* New mapping signature and mapping string source fixed. {pull}37401[#37401] + +Features/Ingest:: +* ingest: compile mustache template only if field includes '{{'' {pull}37207[#37207] (issue: {issue}37120[#37120]) +* Move ingest-geoip default databases out of config {pull}36949[#36949] (issue: {issue}36898[#36898]) +* Make the ingest-geoip databases even lazier to load {pull}36679[#36679] +* Updates the grok patterns to be consistent with the logstash {pull}27181[#27181] + +Features/Java High Level REST Client:: +* HLRC: Fix strict setting exception handling {pull}37247[#37247] (issue: {issue}37090[#37090]) +* HLRC: Use nonblocking entity for requests {pull}32249[#32249] + +Features/Monitoring:: +* Adding mapping for hostname field {pull}37288[#37288] + +Features/Stats:: +* Stats: Add JVM dns cache expiration config to JvmInfo {pull}36372[#36372] + +Features/Watcher:: +* Move watcher to use seq# and primary term for concurrency control {pull}37977[#37977] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Use ILM for Watcher history deletion {pull}37443[#37443] (issue: {issue}32041[#32041]) +* Watcher: Add whitelist to HttpClient {pull}36817[#36817] (issue: {issue}29937[#29937]) + +Infra/Core:: +* fix a few versionAdded values in ElasticsearchExceptions {pull}37877[#37877] +* Add simple method to write collection of writeables {pull}37448[#37448] (issue: {issue}37398[#37398]) +* Date/Time parsing: Use java time API instead of exception handling {pull}37222[#37222] +* [API] spelling: interruptible {pull}37049[#37049] (issue: {issue}37035[#37035]) + +Infra/Logging:: +* Trim the JSON source in indexing slow logs {pull}38081[#38081] (issue: {issue}38080[#38080]) +* Optimize warning header de-duplication {pull}37725[#37725] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597], {issue}37622[#37622]) +* Remove warn-date from warning headers {pull}37622[#37622] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597]) +* Add some deprecation optimizations {pull}37597[#37597] (issues: {issue}35754[#35754], {issue}37530[#37530]) +* Only update response headers if we have a new one {pull}37590[#37590] (issues: {issue}35754[#35754], {issue}37530[#37530]) + +Infra/Packaging:: +* Add OS/architecture classifier to distributions {pull}37881[#37881] +* Change file descriptor limit to 65535 {pull}37537[#37537] (issue: {issue}35839[#35839]) +* Exit batch files explictly using ERRORLEVEL {pull}29583[#29583] (issue: {issue}29582[#29582]) + +Infra/Scripting:: +* Add getZone to JodaCompatibleZonedDateTime {pull}37084[#37084] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] + +Infra/Settings:: +* Separate out validation of groups of settings {pull}34184[#34184] + +License:: +* Handle malformed license signatures {pull}37137[#37137] (issue: {issue}35340[#35340]) + +Machine Learning:: +* Move ML Optimistic Concurrency Control to Seq No {pull}38278[#38278] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* [ML] Add explanation so far to file structure finder exceptions {pull}38191[#38191] (issue: {issue}29821[#29821]) +* ML: Add reason field in JobTaskState {pull}38029[#38029] (issue: {issue}34431[#34431]) +* [ML] Add _meta information to all ML indices {pull}37964[#37964] +* ML: Add upgrade mode docs, hlrc, and fix bug {pull}37942[#37942] +* [ML] Tighten up use of aliases rather than concrete indices {pull}37874[#37874] +* ML: Add support for single bucket aggs in Datafeeds {pull}37544[#37544] (issue: {issue}36838[#36838]) +* [ML] Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* [ML] Merge the Jindex master feature branch {pull}36702[#36702] (issue: {issue}32905[#32905]) +* [FEATURE][ML] Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) + +Mapping:: +* Log document id when MapperParsingException occurs {pull}37800[#37800] (issue: {issue}37658[#37658]) +* [API] spelling: unknown {pull}37056[#37056] (issue: {issue}37035[#37035]) +* Make SourceToParse immutable {pull}36971[#36971] +* Use index-prefix fields for terms of length min_chars - 1 {pull}36703[#36703] + +Network:: +* Enable TLSv1.3 by default for JDKs with support {pull}38103[#38103] (issue: {issue}32276[#32276]) + +Recovery:: +* SyncedFlushService.getShardRoutingTable() should use metadata to check for index existence {pull}37691[#37691] (issue: {issue}33888[#33888]) +* Make prepare engine step of recovery source non-blocking {pull}37573[#37573] (issue: {issue}37174[#37174]) +* Make recovery source send operations non-blocking {pull}37503[#37503] (issue: {issue}37458[#37458]) +* Prepare to make send translog of recovery non-blocking {pull}37458[#37458] (issue: {issue}37291[#37291]) +* Make finalize step of recovery source non-blocking {pull}37388[#37388] (issue: {issue}37291[#37291]) +* Make recovery source partially non-blocking {pull}37291[#37291] (issue: {issue}36195[#36195]) +* Do not mutate RecoveryResponse {pull}37204[#37204] (issue: {issue}37174[#37174]) +* Don't block on peer recovery on the target side {pull}37076[#37076] (issue: {issue}36195[#36195]) +* Reduce recovery time with compress or secure transport {pull}36981[#36981] (issue: {issue}33844[#33844]) +* Translog corruption marker {pull}33415[#33415] (issue: {issue}31389[#31389]) + +Rollup:: +* Replace the TreeMap in the composite aggregation {pull}36675[#36675] + +SQL:: +* SQL: Allow look-ahead resolution of aliases for WHERE clause {pull}38450[#38450] (issue: {issue}29983[#29983]) +* SQL: Implement CURRENT_DATE {pull}38175[#38175] (issue: {issue}38160[#38160]) +* SQL: Generate relevant error message when grouping functions are not used in GROUP BY {pull}38017[#38017] (issue: {issue}37952[#37952]) +* SQL: Skip the nested and object field types in case of an ODBC request {pull}37948[#37948] (issue: {issue}37801[#37801]) +* SQL: Add protocol tests and remove jdbc_type from drivers response {pull}37516[#37516] (issues: {issue}36635[#36635], {issue}36882[#36882]) +* SQL: Remove slightly used meta commands {pull}37506[#37506] (issue: {issue}37409[#37409]) +* SQL: Describe aliases as views {pull}37496[#37496] (issue: {issue}37422[#37422]) +* SQL: Make `FULL` non-reserved keyword in the grammar {pull}37377[#37377] (issue: {issue}37376[#37376]) +* SQL: Use declared source for error messages {pull}37161[#37161] +* SQL: Improve error message when unable to translate to ES query DSL {pull}37129[#37129] (issue: {issue}37040[#37040]) +* [API] spelling: subtract {pull}37055[#37055] (issue: {issue}37035[#37035]) +* [API] spelling: similar {pull}37054[#37054] (issue: {issue}37035[#37035]) +* [API] spelling: input {pull}37048[#37048] (issue: {issue}37035[#37035]) +* SQL: Enhance message for PERCENTILE[_RANK] with field as 2nd arg {pull}36933[#36933] (issue: {issue}36903[#36903]) +* SQL: Preserve original source for each expression {pull}36912[#36912] (issue: {issue}36894[#36894]) +* SQL: Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* SQL: Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) + +Search:: +* Tie break on cluster alias when merging shard search failures {pull}38715[#38715] (issue: {issue}38672[#38672]) +* Add finalReduce flag to SearchRequest {pull}38104[#38104] (issues: {issue}37000[#37000], {issue}37838[#37838]) +* Streamline skip_unavailable handling {pull}37672[#37672] (issue: {issue}32125[#32125]) +* Expose sequence number and primary terms in search responses {pull}37639[#37639] +* Add support for merging multiple search responses into one {pull}37566[#37566] (issue: {issue}32125[#32125]) +* Allow field types to optimize phrase prefix queries {pull}37436[#37436] (issue: {issue}31921[#31921]) +* Add support for providing absolute start time to SearchRequest {pull}37142[#37142] (issue: {issue}32125[#32125]) +* Ensure that local cluster alias is never treated as remote {pull}37121[#37121] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* [API] spelling: cacheable {pull}37047[#37047] (issue: {issue}37035[#37035]) +* Add ability to suggest shard_size on coord node rewrite {pull}37017[#37017] (issues: {issue}32125[#32125], {issue}36997[#36997], {issue}37000[#37000]) +* Skip final reduction if SearchRequest holds a cluster alias {pull}37000[#37000] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* Add support for local cluster alias to SearchRequest {pull}36997[#36997] (issue: {issue}32125[#32125]) +* Use SearchRequest copy constructor in ExpandSearchPhase {pull}36772[#36772] (issue: {issue}36641[#36641]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) + +Security:: +* Move CAS operations in TokenService to sequence numbers {pull}38311[#38311] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Cleanup construction of interceptors {pull}38294[#38294] +* Add passphrase support to elasticsearch-keystore {pull}37472[#37472] (issue: {issue}32691[#32691]) + +Snapshot/Restore:: +* RestoreService should update primary terms when restoring shards of existing indices {pull}38177[#38177] (issue: {issue}33888[#33888]) +* Allow open indices to be restored {pull}37733[#37733] +* Create specific exception for when snapshots are in progress {pull}37550[#37550] (issue: {issue}37541[#37541]) +* SNAPSHOT: Make Atomic Blob Writes Mandatory {pull}37168[#37168] (issues: {issue}37011[#37011], {issue}37066[#37066]) +* SNAPSHOT: Speed up HDFS Repository Writes {pull}37069[#37069] +* Implement Atomic Blob Writes for HDFS Repository {pull}37066[#37066] (issue: {issue}37011[#37011]) +* [API] spelling: repositories {pull}37053[#37053] (issue: {issue}37035[#37035]) +* SNAPSHOT: Use CancellableThreads to Abort {pull}35901[#35901] (issue: {issue}21759[#21759]) +* WIP: S3 client encryption {pull}30513[#30513] (issues: {issue}11128[#11128], {issue}16843[#16843]) + +Suggesters:: +* Remove unused empty constructors from suggestions classes {pull}37295[#37295] +* [API] spelling: likelihood {pull}37052[#37052] (issue: {issue}37035[#37035]) + +ZenDiscovery:: +* Add elasticsearch-node detach-cluster tool {pull}37979[#37979] +* Deprecate minimum_master_nodes {pull}37868[#37868] +* Step down as master when configured out of voting configuration {pull}37802[#37802] (issue: {issue}37712[#37712]) +* Enforce cluster UUIDs {pull}37775[#37775] +* Bubble exceptions up in ClusterApplierService {pull}37729[#37729] +* Use m_m_nodes from Zen1 master for Zen2 bootstrap {pull}37701[#37701] +* Add tool elasticsearch-node unsafe-bootstrap {pull}37696[#37696] +* Report terms and version if cluster does not form {pull}37473[#37473] +* Bootstrap a Zen2 cluster once quorum is discovered {pull}37463[#37463] +* Zen2: Add join validation {pull}37203[#37203] +* Publish cluster states in chunks {pull}36973[#36973] + + + +[[bug-7.0.0-beta1]] +[float] +=== Bug fixes + +Aggregations:: +* Don't load global ordinals with the `map` execution_hint {pull}37833[#37833] (issue: {issue}37705[#37705]) +* Issue #37303 - Invalid variance fix {pull}37384[#37384] (issue: {issue}37303[#37303]) + +Allocation:: +* Fix _host based require filters {pull}38173[#38173] +* ALLOC: Fail Stale Primary Alloc. Req. without Data {pull}37226[#37226] (issue: {issue}37098[#37098]) + +Audit:: +* Fix NPE in Logfile Audit Filter {pull}38120[#38120] (issue: {issue}38097[#38097]) + +Authentication:: +* Enhance parsing of StatusCode in SAML Responses {pull}38628[#38628] +* Limit token expiry to 1 hour maximum {pull}38244[#38244] +* Fix expired token message in Exception header {pull}37196[#37196] +* Fix NPE in CachingUsernamePasswordRealm {pull}36953[#36953] (issue: {issue}36951[#36951]) + +CCR:: +* Prevent CCR recovery from missing documents {pull}38237[#38237] +* Fix file reading in ccr restore service {pull}38117[#38117] +* Correct argument names in update mapping/settings from leader {pull}38063[#38063] +* Ensure changes requests return the latest mapping version {pull}37633[#37633] +* Do not set fatal exception when shard follow task is stopped. {pull}37603[#37603] +* Add fatal_exception field for ccr stats in monitoring mapping {pull}37563[#37563] +* Do not add index event listener if CCR disabled {pull}37432[#37432] +* When removing an AutoFollower also mark it as removed. {pull}37402[#37402] (issue: {issue}36761[#36761]) +* [CCR] Make shard follow tasks more resilient for restarts {pull}37239[#37239] (issue: {issue}37231[#37231]) +* [CCR] Resume follow Api should not require a request body {pull}37217[#37217] (issue: {issue}37022[#37022]) +* [CCR] Report error if auto follower tries auto follow a leader index with soft deletes disabled {pull}36886[#36886] (issue: {issue}33007[#33007]) +* Remote cluster license checker and no license info. {pull}36837[#36837] (issue: {issue}36815[#36815]) +* Make CCR resilient against missing remote cluster connections {pull}36682[#36682] (issues: {issue}36255[#36255], {issue}36667[#36667]) +* [CCR] AutoFollowCoordinator and follower index already created {pull}36540[#36540] (issue: {issue}33007[#33007]) + +CRUD:: +* Fix Reindex from remote query logic {pull}36908[#36908] +* Synchronize WriteReplicaResult callbacks {pull}36770[#36770] + +Distributed:: +* TransportVerifyShardBeforeCloseAction should force a flush {pull}38401[#38401] (issues: {issue}33888[#33888], {issue}37961[#37961]) +* Fix limit on retaining sequence number {pull}37992[#37992] (issue: {issue}37165[#37165]) +* Close Index API should force a flush if a sync is needed {pull}37961[#37961] (issues: {issue}33888[#33888], {issue}37426[#37426]) +* Force Refresh Listeners when Acquiring all Operation Permits {pull}36835[#36835] +* Replaced the word 'shards' with 'replicas' in an error message. (#36234) {pull}36275[#36275] (issue: {issue}36234[#36234]) + +Engine:: +* Subclass NIOFSDirectory instead of using FileSwitchDirectory {pull}37140[#37140] (issues: {issue}36668[#36668], {issue}37111[#37111]) + +Features/ILM:: +* Preserve ILM operation mode when creating new lifecycles {pull}38134[#38134] (issues: {issue}38229[#38229], {issue}38230[#38230]) +* Retry ILM steps that fail due to SnapshotInProgressException {pull}37624[#37624] (issues: {issue}37541[#37541], {issue}37552[#37552]) +* Remove `indexing_complete` when removing policy {pull}36620[#36620] + +Features/Indices APIs:: +* Reject delete index requests with a body {pull}37501[#37501] (issue: {issue}8217[#8217]) +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Get Aliases with wildcard exclusion expression {pull}34230[#34230] (issues: {issue}33518[#33518], {issue}33805[#33805], {issue}34144[#34144]) + +Features/Ingest:: +* Support unknown fields in ingest pipeline map configuration {pull}38352[#38352] (issue: {issue}36938[#36938]) +* Ingest node - user_agent, move device parsing to an object {pull}38115[#38115] (issues: {issue}37329[#37329], {issue}38094[#38094]) +* ingest: fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* ingest: support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) + +Features/Java High Level REST Client:: +* Update IndexTemplateMetaData to allow unknown fields {pull}38448[#38448] (issue: {issue}36938[#36938]) +* `if_seq_no` and `if_primary_term` parameters aren't wired correctly in REST Client's CRUD API {pull}38411[#38411] +* Update Rollup Caps to allow unknown fields {pull}38339[#38339] (issue: {issue}36938[#36938]) +* Fix ILM explain response to allow unknown fields {pull}38054[#38054] (issue: {issue}36938[#36938]) +* Fix ILM status to allow unknown fields {pull}38043[#38043] (issue: {issue}36938[#36938]) +* Fix ILM Lifecycle Policy to allow unknown fields {pull}38041[#38041] (issue: {issue}36938[#36938]) +* Update authenticate to allow unknown fields {pull}37713[#37713] (issue: {issue}36938[#36938]) +* Update verify repository to allow unknown fields {pull}37619[#37619] (issue: {issue}36938[#36938]) +* Update get users to allow unknown fields {pull}37593[#37593] (issue: {issue}36938[#36938]) +* Update Execute Watch to allow unknown fields {pull}37498[#37498] (issue: {issue}36938[#36938]) +* Update Put Watch to allow unknown fields {pull}37494[#37494] (issue: {issue}36938[#36938]) +* Update Delete Watch to allow unknown fields {pull}37435[#37435] (issue: {issue}36938[#36938]) +* Fix rest reindex test for IPv4 addresses {pull}37310[#37310] +* Fix weighted_avg parser not found for RestHighLevelClient {pull}37027[#37027] (issue: {issue}36861[#36861]) + +Features/Java Low Level REST Client:: +* Fix potential IllegalCapacityException in LLRC when selecting nodes {pull}37821[#37821] + +Features/Monitoring:: +* Allow built-in monitoring_user role to call GET _xpack API {pull}38060[#38060] (issue: {issue}37970[#37970]) + +Features/Watcher:: +* Support merge nested Map in list for JIRA configurations {pull}37634[#37634] (issue: {issue}30068[#30068]) +* Watcher accounts constructed lazily {pull}36656[#36656] +* Ensures watch definitions are valid json {pull}30692[#30692] (issue: {issue}29746[#29746]) + +Geo:: +* Fix GeoHash PrefixTree BWC {pull}38584[#38584] (issue: {issue}38494[#38494]) +* Geo: Do not normalize the longitude with value -180 for Lucene shapes {pull}37299[#37299] (issue: {issue}37297[#37297]) + +Infra/Core:: +* Bubble-up exceptions from scheduler {pull}38317[#38317] (issue: {issue}38014[#38014]) +* Core: Revert back to joda's multi date formatters {pull}36814[#36814] (issues: {issue}36447[#36447], {issue}36602[#36602]) +* Propagate Errors in executors to uncaught exception handler {pull}36137[#36137] (issue: {issue}28667[#28667]) + +Infra/Packaging:: +* Remove NOREPLACE for /etc/elasticsearch in rpm and deb {pull}37839[#37839] +* Packaging: Update marker used to allow ELASTIC_PASSWORD {pull}37243[#37243] (issue: {issue}37240[#37240]) +* Packaging: Remove permission editing in postinst {pull}37242[#37242] (issue: {issue}37143[#37143]) + +Infra/REST API:: +* Reject all requests that have an unconsumed body {pull}37504[#37504] (issues: {issue}30792[#30792], {issue}37501[#37501], {issue}8217[#8217]) + +Infra/Scripting:: +* Fix Painless void return bug {pull}38046[#38046] + +Infra/Settings:: +* Fix setting by time unit {pull}37192[#37192] +* Fix handling of fractional byte size value settings {pull}37172[#37172] +* Fix handling of fractional time value settings {pull}37171[#37171] + +Machine Learning:: +* [ML] Report index unavailable instead of waiting for lazy node {pull}38423[#38423] +* ML: Fix error race condition on stop _all datafeeds and close _all jobs {pull}38113[#38113] (issue: {issue}37959[#37959]) +* [ML] Update ML results mappings on process start {pull}37706[#37706] (issue: {issue}37607[#37607]) +* [ML] Prevent submit after autodetect worker is stopped {pull}37700[#37700] (issue: {issue}37108[#37108]) +* [ML] Fix ML datafeed CCS with wildcarded cluster name {pull}37470[#37470] (issue: {issue}36228[#36228]) +* [ML] Update error message for process update {pull}37363[#37363] +* [ML] Wait for autodetect to be ready in the datafeed {pull}37349[#37349] (issues: {issue}36810[#36810], {issue}37227[#37227]) +* [ML] Stop datafeeds running when their jobs are stale {pull}37227[#37227] (issue: {issue}36810[#36810]) +* [ML] Order GET job stats response by job id {pull}36841[#36841] (issue: {issue}36683[#36683]) +* [ML] Make GetJobStats work with arbitrary wildcards and groups {pull}36683[#36683] (issue: {issue}34745[#34745]) + +Mapping:: +* Treat put-mapping calls with `_doc` as a top-level key as typed calls. {pull}38032[#38032] +* Correct deprec log in RestGetFieldMappingAction {pull}37843[#37843] (issue: {issue}37667[#37667]) +* Restore a noop _all metadata field for 6x indices {pull}37808[#37808] (issue: {issue}37429[#37429]) +* Make sure PutMappingRequest accepts content types other than JSON. {pull}37720[#37720] +* Make sure to use the resolved type in DocumentMapperService#extractMappings. {pull}37451[#37451] (issue: {issue}36811[#36811]) +* MAPPING: Improve Precision for scaled_float {pull}37169[#37169] (issue: {issue}32570[#32570]) +* Make sure to accept empty unnested mappings in create index requests. {pull}37089[#37089] +* Stop automatically nesting mappings in index creation requests. {pull}36924[#36924] +* Rewrite SourceToParse with resolved docType {pull}36921[#36921] (issues: {issue}35790[#35790], {issue}36769[#36769]) + +Network:: +* Reload SSL context on file change for LDAP {pull}36937[#36937] (issues: {issue}30509[#30509], {issue}36923[#36923]) +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) + +Ranking:: +* QueryRescorer should keep the window size when rewriting {pull}36836[#36836] + +Recovery:: +* RecoveryMonitor#lastSeenAccessTime should be volatile {pull}36781[#36781] + +Rollup:: +* Fix Rollup's metadata parser {pull}36791[#36791] (issue: {issue}36726[#36726]) +* Fix rollup search statistics {pull}36674[#36674] + +SQL:: +* SQL: Prevent grouping over grouping functions {pull}38649[#38649] (issue: {issue}38308[#38308]) +* SQL: Relax StackOverflow circuit breaker for constants {pull}38572[#38572] (issue: {issue}38571[#38571]) +* SQL: Fix issue with IN not resolving to underlying keyword field {pull}38440[#38440] (issue: {issue}38424[#38424]) +* SQL: change the Intervals milliseconds precision to 3 digits {pull}38297[#38297] (issue: {issue}37423[#37423]) +* SQL: Fix esType for DATETIME/DATE and INTERVALS {pull}38179[#38179] (issue: {issue}38051[#38051]) +* SQL: Added SSL configuration options tests {pull}37875[#37875] (issue: {issue}37711[#37711]) +* SQL: Fix casting from date to numeric type to use millis {pull}37869[#37869] (issue: {issue}37655[#37655]) +* SQL: Fix BasicFormatter NPE {pull}37804[#37804] +* SQL: Return Intervals in SQL format for CLI {pull}37602[#37602] (issues: {issue}29970[#29970], {issue}36186[#36186], {issue}36432[#36432]) +* SQL: fix object extraction from sources {pull}37502[#37502] (issue: {issue}37364[#37364]) +* SQL: Fix issue with field names containing "." {pull}37364[#37364] (issue: {issue}37128[#37128]) +* SQL: Fix bug regarding alias fields with dots {pull}37279[#37279] (issue: {issue}37224[#37224]) +* SQL: Proper handling of COUNT(field_name) and COUNT(DISTINCT field_name) {pull}37254[#37254] (issue: {issue}30285[#30285]) +* SQL: fix COUNT DISTINCT filtering {pull}37176[#37176] (issue: {issue}37086[#37086]) +* SQL: Fix issue with wrong NULL optimization {pull}37124[#37124] (issue: {issue}35872[#35872]) +* SQL: Fix issue with complex expression as args of PERCENTILE/_RANK {pull}37102[#37102] (issue: {issue}37099[#37099]) +* SQL: Handle the bwc Joda ZonedDateTime scripting class in Painless {pull}37024[#37024] (issue: {issue}37023[#37023]) +* SQL: Fix bug regarding histograms usage in scripting {pull}36866[#36866] +* SQL: Fix issue with always false filter involving functions {pull}36830[#36830] (issue: {issue}35980[#35980]) +* SQL: protocol returns ISO 8601 String formatted dates instead of Long for JDBC/ODBC requests {pull}36800[#36800] (issue: {issue}36756[#36756]) +* SQL: Enhance Verifier to prevent aggregate or grouping functions from {pull}36799[#36799] (issue: {issue}36798[#36798]) +* SQL: Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* SQL: Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* SQL: Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* SQL: Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) + +Search:: +* Look up connection using the right cluster alias when releasing contexts {pull}38570[#38570] +* Fix fetch source option in expand search phase {pull}37908[#37908] (issue: {issue}23829[#23829]) +* Change `rational` to `saturation` in script_score {pull}37766[#37766] (issue: {issue}37714[#37714]) +* Throw if two inner_hits have the same name {pull}37645[#37645] (issue: {issue}37584[#37584]) +* Ensure either success or failure path for SearchOperationListener is called {pull}37467[#37467] (issue: {issue}37185[#37185]) +* `query_string` should use indexed prefixes {pull}36895[#36895] +* Avoid duplicate types deprecation messages in search-related APIs. {pull}36802[#36802] + +Security:: +* Fix exit code for Security CLI tools {pull}37956[#37956] (issue: {issue}37841[#37841]) +* Fix potential NPE in UsersTool {pull}37660[#37660] + +Snapshot/Restore:: +* Fix Concurrent Snapshot Ending And Stabilize Snapshot Finalization {pull}38368[#38368] (issue: {issue}38226[#38226]) +* Fix Two Races that Lead to Stuck Snapshots {pull}37686[#37686] (issues: {issue}32265[#32265], {issue}32348[#32348]) +* Fix Race in Concurrent Snapshot Delete and Create {pull}37612[#37612] (issue: {issue}37581[#37581]) +* Streamline S3 Repository- and Client-Settings {pull}37393[#37393] + +Suggesters:: +* Fix duplicate removal when merging completion suggestions {pull}36996[#36996] (issue: {issue}35836[#35836]) + +Task Management:: +* Un-assign persistent tasks as nodes exit the cluster {pull}37656[#37656] + +ZenDiscovery:: +* Fix size of rolling-upgrade bootstrap config {pull}38031[#38031] +* Always return metadata version if metadata is requested {pull}37674[#37674] +* [Zen2] Elect freshest master in upgrade {pull}37122[#37122] (issue: {issue}40[#40]) +* Fix cluster state persistence for single-node discovery {pull}36825[#36825] + + + +[[regression-7.0.0-beta1]] +[float] +=== Regressions + +Infra/Core:: +* Restore date aggregation performance in UTC case {pull}38221[#38221] (issue: {issue}37826[#37826]) +* Speed up converting of temporal accessor to zoned date time {pull}37915[#37915] (issue: {issue}37826[#37826]) + +Mapping:: +* Performance fix. Reduce deprecation calls for the same bulk request {pull}37415[#37415] (issue: {issue}37411[#37411]) + + + +[[upgrade-7.0.0-beta1]] +[float] +=== Upgrades + +Engine:: +* Upgrade to lucene-8.0.0-snapshot-83f9835. {pull}37668[#37668] + +Machine Learning:: +* [ML] No need to add state doc mapping on job open in 7.x {pull}37759[#37759] diff --git a/docs/reference/release-notes/7.0.0-rc1.asciidoc b/docs/reference/release-notes/7.0.0-rc1.asciidoc new file mode 100644 index 0000000000000..8d56952904630 --- /dev/null +++ b/docs/reference/release-notes/7.0.0-rc1.asciidoc @@ -0,0 +1,193 @@ +[[release-notes-7.0.0-rc1]] +== {es} version 7.0.0-rc1 + +Also see <>. + +[[breaking-7.0.0-rc1]] +[float] +=== Breaking changes + +Distributed:: +* Remove cluster state size {pull}40061[#40061] (issues: {issue}39806[#39806], {issue}39827[#39827], {issue}39951[#39951], {issue}40016[#40016]) + +Features/Features:: +* Remove Migration Upgrade and Assistance APIs {pull}40075[#40075] (issue: {issue}40014[#40014]) + + + +[[deprecation-7.0.0-rc1]] +[float] +=== Deprecations + +Cluster Coordination:: +* Deprecate size in cluster state response {pull}39951[#39951] (issue: {issue}39806[#39806]) + +Infra/Packaging:: +* Deprecate fallback to java on PATH {pull}37990[#37990] + + + +[[feature-7.0.0-rc1]] +[float] +=== New features + +Allocation:: +* Node repurpose tool {pull}39403[#39403] (issues: {issue}37347[#37347], {issue}37748[#37748]) + +Security:: +* Switch internal security index to ".security-7" {pull}39337[#39337] (issue: {issue}39284[#39284]) + + + +[[enhancement-7.0.0-rc1]] +[float] +=== Enhancements + +CCR:: +* Reduce retention lease sync intervals {pull}40302[#40302] +* Renew retention leases while following {pull}39335[#39335] (issues: {issue}37165[#37165], {issue}38718[#38718]) +* Reduce refresh when lookup term in FollowingEngine {pull}39184[#39184] +* Integrate retention leases to recovery from remote {pull}38829[#38829] (issue: {issue}37165[#37165]) +* Enable removal of retention leases {pull}38751[#38751] (issue: {issue}37165[#37165]) + +Client:: +* Fixed required fields and paths list {pull}39358[#39358] + +Discovery-Plugins:: +* Adds connect and read timeouts to discovery-gce {pull}28193[#28193] (issue: {issue}24313[#24313]) + +Distributed:: +* Introduce retention lease actions {pull}38756[#38756] (issue: {issue}37165[#37165]) +* Add dedicated retention lease exceptions {pull}38754[#38754] (issue: {issue}37165[#37165]) +* Copy retention leases when trim unsafe commits {pull}37995[#37995] (issue: {issue}37165[#37165]) + +Docs Infrastructure:: +* Align generated release notes with doc standards {pull}39234[#39234] (issue: {issue}39155[#39155]) + +Engine:: +* Explicitly advance max_seq_no before indexing {pull}39473[#39473] (issue: {issue}38879[#38879]) + +Infra/Core:: +* Add details about what acquired the shard lock last {pull}38807[#38807] (issue: {issue}30290[#30290]) + +Infra/Packaging:: +* Use bundled JDK in Docker images {pull}40238[#40238] +* Upgrade bundled JDK and Docker images to JDK 12 {pull}40229[#40229] +* Bundle java in distributions {pull}38013[#38013] (issue: {issue}31845[#31845]) + +Infra/Settings:: +* Provide a clearer error message on keystore add {pull}39327[#39327] (issue: {issue}39324[#39324]) + +Percolator:: +* Make the `type` parameter optional when percolating existing documents. {pull}39987[#39987] (issue: {issue}39963[#39963]) +* Add support for selecting percolator query candidate matches containing geo_point based queries {pull}26040[#26040] + +SQL:: +* Enhance checks for inexact fields {pull}39427[#39427] (issue: {issue}38501[#38501]) +* Change the default precision for CURRENT_TIMESTAMP function {pull}39391[#39391] (issue: {issue}39288[#39288]) + + + +[[bug-7.0.0-rc1]] +[float] +=== Bug fixes + +Aggregations:: +* Skip sibling pipeline aggregators reduction during non-final reduce {pull}40101[#40101] (issue: {issue}40059[#40059]) +* Extend nextDoc to delegate to the wrapped doc-value iterator for date_nanos {pull}39176[#39176] (issue: {issue}39107[#39107]) +* Only create MatrixStatsResults on final reduction {pull}38130[#38130] (issue: {issue}37587[#37587]) + +Authentication:: +* Allow non super users to create API keys {pull}40028[#40028] (issue: {issue}40029[#40029]) +* Use consistent view of realms for authentication {pull}38815[#38815] (issue: {issue}30301[#30301]) + +CCR:: +* Safe publication of AutoFollowCoordinator {pull}40153[#40153] (issue: {issue}38560[#38560]) +* Enable reading auto-follow patterns from x-content {pull}40130[#40130] (issue: {issue}40128[#40128]) +* Stop auto-followers on shutdown {pull}40124[#40124] +* Protect against the leader index being removed {pull}39351[#39351] (issue: {issue}39308[#39308]) +* Handle the fact that `ShardStats` instance may have no commit or seqno stats {pull}38782[#38782] (issue: {issue}38779[#38779]) +* Fix LocalIndexFollowingIT#testRemoveRemoteConnection() test {pull}38709[#38709] (issue: {issue}38695[#38695]) + +CRUD:: +* Cascading primary failure lead to MSU too low {pull}40249[#40249] + +Cluster Coordination:: +* Fix node tool cleanup {pull}39389[#39389] +* Avoid serialising state if it was already serialised {pull}39179[#39179] + +Distributed:: +* Ignore waitForActiveShards when syncing leases {pull}39224[#39224] (issue: {issue}39089[#39089]) +* Fix synchronization in LocalCheckpointTracker#contains {pull}38755[#38755] (issues: {issue}33871[#33871], {issue}38633[#38633]) + +Engine:: +* Bubble up exception when processing NoOp {pull}39338[#39338] (issue: {issue}38898[#38898]) +* ReadOnlyEngine should update translog recovery state information {pull}39238[#39238] + +Features/Features:: +* Only count some fields types for deprecation check {pull}40166[#40166] + +Features/ILM:: +* Handle failure to release retention leases in ILM {pull}39281[#39281] (issue: {issue}39181[#39181]) + +Features/Watcher:: +* Use non-ILM template setting up watch history template & ILM disabled {pull}39325[#39325] (issue: {issue}38805[#38805]) +* Only flush Watcher's bulk processor if Watcher is enabled {pull}38803[#38803] (issue: {issue}38798[#38798]) + +Infra/Core:: +* Correct name of basic_date_time_no_millis {pull}39367[#39367] + +Infra/Packaging:: +* Some elasticsearch-cli tools could not be run not from ES_HOME {pull}39937[#39937] +* Obsolete pre 7.0 noarch package in rpm {pull}39472[#39472] (issue: {issue}39414[#39414]) +* Suppress error message when `/proc/sys/vm/max_map_count` is not exists. {pull}35933[#35933] + +Infra/REST API:: +* Fix #38623 remove xpack namespace REST API {pull}38625[#38625] +* Remove the "xpack" namespace from the REST API {pull}38623[#38623] + +Recovery:: +* Create retention leases file during recovery {pull}39359[#39359] (issue: {issue}37165[#37165]) + +SQL:: +* Add missing handling of IP field in JDBC {pull}40384[#40384] (issue: {issue}40358[#40358]) +* Fix metric aggs on date/time to not return double {pull}40377[#40377] (issues: {issue}39492[#39492], {issue}40376[#40376]) +* CAST supports both SQL and ES types {pull}40365[#40365] (issue: {issue}40282[#40282]) +* Fix RLIKE bug and improve testing for RLIKE statement {pull}40354[#40354] (issues: {issue}34609[#34609], {issue}39931[#39931]) +* Unwrap the first value in an array in case of array leniency {pull}40318[#40318] (issue: {issue}40296[#40296]) +* Preserve original source for cast/convert function {pull}40271[#40271] (issue: {issue}40239[#40239]) +* Fix LIKE function equality by considering its pattern as well {pull}40260[#40260] (issue: {issue}39931[#39931]) +* Fix issue with optimization on queries with ORDER BY/LIMIT {pull}40256[#40256] (issue: {issue}40211[#40211]) +* Rewrite ROUND and TRUNCATE functions with a different optional parameter handling method {pull}40242[#40242] (issue: {issue}40001[#40001]) +* Fix issue with getting DATE type in JDBC {pull}40207[#40207] +* Fix issue with date columns returned always in UTC {pull}40163[#40163] (issue: {issue}40152[#40152]) +* Add multi_value_field_leniency inside FieldHitExtractor {pull}40113[#40113] (issue: {issue}39700[#39700]) +* Fix incorrect ordering of groupings (GROUP BY) based on orderings (ORDER BY) {pull}40087[#40087] (issue: {issue}39956[#39956]) +* Fix bug with JDBC timezone setting and DATE type {pull}39978[#39978] (issue: {issue}39915[#39915]) +* Use underlying exact field for LIKE/RLIKE {pull}39443[#39443] (issue: {issue}39442[#39442]) + +Search:: +* Serialize top-level pipeline aggs as part of InternalAggregations {pull}40177[#40177] (issues: {issue}40059[#40059], {issue}40101[#40101]) +* CCS: Skip empty search hits when minimizing round-trips {pull}40098[#40098] (issues: {issue}32125[#32125], {issue}40067[#40067]) +* CCS: Disable minimizing round-trips when dfs is requested {pull}40044[#40044] (issue: {issue}32125[#32125]) + + + +[[upgrade-7.0.0-rc1]] +[float] +=== Upgrades + +Discovery-Plugins:: +* Bump jackson-databind version for AWS SDK {pull}39183[#39183] + +Engine:: +* Upgrade to Lucene 8.0.0-snapshot-ff9509a8df {pull}39350[#39350] +* Upgrade to Lucene 8.0.0 {pull}39992[#39992] (issue: {issue}39640[#39640]) + +Features/Ingest:: +* Bump jackson-databind version for ingest-geoip {pull}39182[#39182] + +Security:: +* Upgrade the bouncycastle dependency to 1.61 {pull}40017[#40017] (issue: {issue}40011[#40011]) + + diff --git a/docs/reference/release-notes/7.0.0-rc2.asciidoc b/docs/reference/release-notes/7.0.0-rc2.asciidoc new file mode 100644 index 0000000000000..84d3b845fba92 --- /dev/null +++ b/docs/reference/release-notes/7.0.0-rc2.asciidoc @@ -0,0 +1,218 @@ +[[release-notes-7.0.0-rc2]] +== {es} version 7.0.0-rc2 + +Also see <>. + +[[deprecation-7.0.0-rc2]] +[float] +=== Deprecations + +Analysis:: +* Remove `nGram` and `edgeNGram` token filter names (#38911) {pull}39070[#39070] (issues: {issue}30209[#30209], {issue}38911[#38911]) + +Graph:: +* Deprecate types in `_graph/explore` calls. {pull}40466[#40466] + + + +[[enhancement-7.0.0-rc2]] +[float] +=== Enhancements + +CCR:: +* Introduce forget follower API {pull}39718[#39718] (issue: {issue}37165[#37165]) + +Cluster Coordination:: +* Remove timeout task after completing cluster state publication {pull}40411[#40411] +* Use default discovery implementation for single-node discovery {pull}40036[#40036] +* Do not log unsuccessful join attempt each time {pull}39756[#39756] + +Distributed:: +* Allow retention lease operations under blocks {pull}39089[#39089] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Remove retention leases when unfollowing {pull}39088[#39088] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Introduce retention lease state file {pull}39004[#39004] (issues: {issue}37165[#37165], {issue}38588[#38588], {issue}39032[#39032]) +* Enable soft-deletes by default for 7.0+ indices {pull}38929[#38929] (issue: {issue}36141[#36141]) + +Engine:: +* Also mmap cfs files for hybridfs {pull}38940[#38940] (issue: {issue}36668[#36668]) + +Infra/Core:: +* Enhancements to IndicesQueryCache. {pull}39099[#39099] (issue: {issue}37117[#37117]) + +Infra/Packaging:: +* Add no-jdk distributions {pull}39882[#39882] + +Machine Learning:: +* [ML] Allow stop unassigned datafeed and relax unset upgrade mode wait {pull}39034[#39034] + +Mapping:: +* Introduce a parameter suppress_types_warnings. {pull}38923[#38923] + +Recovery:: +* Do not wait for advancement of checkpoint in recovery {pull}39006[#39006] (issues: {issue}38949[#38949], {issue}39000[#39000]) + +SQL:: +* SQL: add "fuzziness" option to QUERY and MATCH function predicates {pull}40529[#40529] (issue: {issue}40495[#40495]) +* SQL: add "validate.properties" property to JDBC's allowed list of settings {pull}39050[#39050] (issue: {issue}38068[#38068]) + +Search:: +* Avoid BytesRef's copying in ScriptDocValues's Strings {pull}29581[#29581] (issue: {issue}29567[#29567]) + +Security:: +* Types removal security index template {pull}39705[#39705] (issue: {issue}38637[#38637]) +* Types removal security index template {pull}39542[#39542] (issue: {issue}38637[#38637]) + +Snapshot/Restore:: +* Mark Deleted Snapshot Directories with Tombstones {pull}40228[#40228] (issue: {issue}39852[#39852]) + +Store:: +* Add option to force load term dict into memory {pull}39741[#39741] + +Features/Monitoring:: +* Remove types from internal monitoring templates and bump to api 7 {pull}39888[#39888] (issue: {issue}38637[#38637]) + +Features/Watcher:: +* Remove the index type from internal watcher indexes {pull}39761[#39761] (issue: {issue}38637[#38637]) + +Infra/Core:: +* Change zone formatting for all printers {pull}39568[#39568] (issue: {issue}38471[#38471]) + + +[[bug-7.0.0-rc2]] +[float] +=== Bug fixes + +Analysis:: +* Fix PreConfiguredTokenFilters getSynonymFilter() implementations {pull}38839[#38839] (issue: {issue}38793[#38793]) + +Audit:: +* LoggingAuditTrail correctly handle ReplicatedWriteRequest {pull}39925[#39925] (issue: {issue}39555[#39555]) + +Authentication:: +* Correct authenticate response for API key {pull}39684[#39684] +* Fix security index auto-create and state recovery race {pull}39582[#39582] + +CCR:: +* Fix shard follow task startup error handling {pull}39053[#39053] (issue: {issue}38779[#38779]) +* Filter out upgraded version index settings when starting index following {pull}38838[#38838] (issue: {issue}38835[#38835]) + +CRUD:: +* Store Pending Deletions Fix {pull}40345[#40345] (issue: {issue}40249[#40249]) +* ShardBulkAction ignore primary response on primary {pull}38901[#38901] + +Cluster Coordination:: +* Do not perform cleanup if Manifest write fails with dirty exception {pull}40519[#40519] (issue: {issue}39077[#39077]) +* Cache compressed cluster state size {pull}39827[#39827] (issue: {issue}39806[#39806]) +* Drop node if asymmetrically partitioned from master {pull}39598[#39598] +* Fixing the custom object serialization bug in diffable utils. {pull}39544[#39544] +* Clean GatewayAllocator when stepping down as master {pull}38885[#38885] + +Distributed:: +* Enforce retention leases require soft deletes {pull}39922[#39922] (issue: {issue}39914[#39914]) +* Treat TransportService stopped error as node is closing {pull}39800[#39800] (issue: {issue}39584[#39584]) +* Use cause to determine if node with primary is closing {pull}39723[#39723] (issue: {issue}39584[#39584]) +* Don’t ack if unable to remove failing replica {pull}39584[#39584] (issue: {issue}39467[#39467]) +* Fix NPE on Stale Index in IndicesService {pull}38891[#38891] (issue: {issue}38845[#38845]) + +Engine:: +* Advance max_seq_no before add operation to Lucene {pull}38879[#38879] (issue: {issue}31629[#31629]) + +Features/Features:: +* Deprecation check for indices with very large numbers of fields {pull}39869[#39869] (issue: {issue}39851[#39851]) + +Features/ILM:: +* Correct ILM metadata minimum compatibility version {pull}40569[#40569] (issue: {issue}40565[#40565]) +* Handle null retention leases in WaitForNoFollowersStep {pull}40477[#40477] + +Features/Ingest:: +* Ingest ingest then create index {pull}39607[#39607] (issues: {issue}32758[#32758], {issue}32786[#32786], {issue}36545[#36545]) + +Features/Monitoring:: +* Don't emit deprecation warnings on calls to the monitoring bulk API. {pull}39805[#39805] (issue: {issue}39336[#39336]) + +Features/Watcher:: +* Fix Watcher stats class cast exception {pull}39821[#39821] (issue: {issue}39780[#39780]) +* Use any index specified by .watches for Watcher {pull}39541[#39541] (issue: {issue}39478[#39478]) +* Resolve concurrency with watcher trigger service {pull}39092[#39092] (issue: {issue}39087[#39087]) + +Geo:: +* Geo Point parse error fix {pull}40447[#40447] (issue: {issue}17617[#17617]) + +Highlighting:: +* Bug fix for AnnotatedTextHighlighter - port of 39525 {pull}39750[#39750] (issue: {issue}39525[#39525]) + +Infra/Core:: +* Allow single digit milliseconds in strict date parsing {pull}40676[#40676] (issue: {issue}40403[#40403]) +* Parse composite patterns using ClassicFormat.parseObject {pull}40100[#40100] (issue: {issue}39916[#39916]) +* Bat scripts to work with JAVA_HOME with parantheses {pull}39712[#39712] (issues: {issue}30606[#30606], {issue}33405[#33405], {issue}38578[#38578], {issue}38624[#38624]) +* Change licence expiration date pattern {pull}39681[#39681] (issue: {issue}39136[#39136]) +* Fix DateFormatters.parseMillis when no timezone is given {pull}39100[#39100] (issue: {issue}39067[#39067]) +* Don't close caches while there might still be in-flight requests. {pull}38958[#38958] (issue: {issue}37117[#37117]) + +Infra/Packaging:: +* Use TAR instead of DOCKER build type before 6.7.0 {pull}40723[#40723] (issues: {issue}39378[#39378], {issue}40511[#40511]) + +Infra/REST API:: +* Update spec files that erroneously documented parts as optional {pull}39122[#39122] +* ilm.explain_lifecycle documents human again {pull}39113[#39113] +* Index on rollup.rollup_search.json is a list {pull}39097[#39097] + +MULTIPLE AREA LABELS:: +* metric on watcher stats is a list not an enum {pull}39114[#39114] + +Machine Learning:: +* [ML] Fix datafeed skipping first bucket after lookback when aggs are … {pull}39859[#39859] (issue: {issue}39842[#39842]) +* [ML] refactoring lazy query and agg parsing {pull}39776[#39776] (issue: {issue}39528[#39528]) +* [ML] Stop the ML memory tracker before closing node {pull}39111[#39111] (issue: {issue}37117[#37117]) + +Mapping:: +* Optimise rejection of out-of-range `long` values {pull}40325[#40325] (issues: {issue}26137[#26137], {issue}40323[#40323]) + +Recovery:: +* Recover peers from translog, ignoring soft deletes {pull}38904[#38904] (issue: {issue}37165[#37165]) +* Retain history for peer recovery using leases {pull}38855[#38855] + +Rollup:: +* Remove timezone validation on rollup range queries {pull}40647[#40647] + +SQL:: +* SQL: Fix display size for DATE/DATETIME {pull}40669[#40669] +* SQL: have LIKE/RLIKE use wildcard and regexp queries {pull}40628[#40628] (issue: {issue}40557[#40557]) +* SQL: Fix getTime() methods in JDBC {pull}40484[#40484] +* SQL: SYS TABLES: enumerate tables of requested types {pull}40535[#40535] (issue: {issue}40348[#40348]) +* SQL: passing an input to the CLI "freezes" the CLI after displaying an error message {pull}40164[#40164] (issue: {issue}40557[#40557]) +* SQL: Wrap ZonedDateTime parameters inside scripts {pull}39911[#39911] (issue: {issue}39877[#39877]) +* SQL: ConstantProcessor can now handle NamedWriteable {pull}39876[#39876] (issue: {issue}39875[#39875]) +* SQL: Extend the multi dot field notation extraction to lists of values {pull}39823[#39823] (issue: {issue}39738[#39738]) +* SQL: values in datetime script aggs should be treated as long {pull}39773[#39773] (issue: {issue}37042[#37042]) +* SQL: Don't allow inexact fields for MIN/MAX {pull}39563[#39563] (issue: {issue}39427[#39427]) +* SQL: Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547]) +* SQL: fix COUNT DISTINCT column name {pull}39537[#39537] (issue: {issue}39511[#39511]) +* SQL: Enable accurate hit tracking on demand {pull}39527[#39527] (issue: {issue}37971[#37971]) +* SQL: ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471]) +* SQL: enforce JDBC driver - ES server version parity {pull}38972[#38972] (issue: {issue}38775[#38775]) +* SQL: fall back to using the field name for column label {pull}38842[#38842] (issue: {issue}38831[#38831]) + +Search:: +* Fix Fuzziness#asDistance(String) {pull}39643[#39643] (issue: {issue}39614[#39614]) + +Security:: +* Remove dynamic objects from security index {pull}40499[#40499] (issue: {issue}35460[#35460]) +* Fix libs:ssl-config project setup {pull}39074[#39074] +* Do not create the missing index when invoking getRole {pull}39039[#39039] + +Snapshot/Restore:: +* Blob store compression fix {pull}39073[#39073] + + + +[[upgrade-7.0.0-rc2]] +[float] +=== Upgrades + +Snapshot/Restore:: +* plugins/repository-gcs: Update google-cloud-storage/core to 1.59.0 {pull}39748[#39748] (issue: {issue}39366[#39366]) + +Search:: +* Upgrade to Lucene 8.0.0 GA {pull}39992[#39992] (issue: {issue}39640[#39640]) + diff --git a/docs/reference/release-notes/7.0.asciidoc b/docs/reference/release-notes/7.0.asciidoc new file mode 100644 index 0000000000000..989c474d6ff51 --- /dev/null +++ b/docs/reference/release-notes/7.0.asciidoc @@ -0,0 +1,1740 @@ +[[release-notes-7.0.1]] +== {es} version 7.0.1 + +Also see <>. + +[[enhancement-7.0.1]] +[float] +=== Enhancements + +CCR:: +* Reduce security permissions in CCR plugin {pull}41391[#41391] + +Infra/Packaging:: +* Be lenient when parsing build flavor and type on the wire {pull}40734[#40734] (issues: {issue}39378[#39378], {issue}40723[#40723]) + +Machine Learning:: +* [ML] Allow xpack.ml.max_machine_memory_percent higher than 100% {pull}41193[#41193] + + + +[[bug-7.0.1]] +[float] +=== Bug fixes + +Allocation:: +* Short-circuit rebalancing when disabled {pull}40966[#40966] (issue: {issue}40942[#40942]) + +Authorization:: +* Fix role mapping DN field wildcards for users with NULL DNs {pull}41343[#41343] (issue: {issue}41305[#41305]) +* Fix Has Privilege API check on restricted indices {pull}41226[#41226] +* Fix put mapping authorization for aliases with a write-index and multiple read indices {pull}40834[#40834] (issue: {issue}40831[#40831]) +* Use alias name from rollover request to query indices stats {pull}40774[#40774] (issue: {issue}40771[#40771]) + +CCR:: +* Suppress lease background sync failures if stopping {pull}40902[#40902] + +Cluster Coordination:: +* Fix multi-node parsing in voting config exclusions REST API {pull}41588[#41588] (issue: {issue}41587[#41587]) +* Validate cluster UUID when joining Zen1 cluster {pull}41063[#41063] (issue: {issue}37775[#37775]) + +Engine:: +* Mark searcher as accessed in acquireSearcher {pull}41335[#41335] + +Features/Monitoring:: +* Properly handle all Monitoring exporters being disabled {pull}40920[#40920] (issue: {issue}40919[#40919]) + +Features/Watcher:: +* Use environment settings instead of state settings for Watcher config {pull}41087[#41087] (issue: {issue}41042[#41042]) + +Highlighting:: +* Unified highlighter should ignore terms that target the _id field {pull}41275[#41275] (issue: {issue}37525[#37525]) +* Unified highlighter should respect no_match_size with number_of_fragments set to 0 {pull}41069[#41069] (issue: {issue}41066[#41066]) + +Infra/Settings:: +* Always check for archiving broken index settings {pull}41209[#41209] + +Machine Learning:: +* [ML] Write header to autodetect before it is visible to other calls {pull}41085[#41085] (issue: {issue}40385[#40385]) +* Fix unsafe publication of invalid license enforcer {pull}40985[#40985] (issue: {issue}40957[#40957]) + +Mapping:: +* Fix error applying `ignore_malformed` to boolean values {pull}41261[#41261] (issue: {issue}11498[#11498]) + +SQL:: +* SQL: Fix bug with optimization of null related conditionals {pull}41355[#41355] +* SQL: Predicate diff takes into account all values {pull}41346[#41346] (issue: {issue}40835[#40835]) +* SQL: Fix LIMIT bug in agg sorting {pull}41258[#41258] (issue: {issue}40984[#40984]) +* SQL: Allow current_date/time/timestamp to be also used as a function escape pattern {pull}41254[#41254] (issue: {issue}41240[#41240]) +* SQL: Translate MIN/MAX on keyword fields as FIRST/LAST {pull}41247[#41247] (issues: {issue}37936[#37936], {issue}41195[#41195]) +* SQL: Tweak pattern matching in SYS TABLES {pull}41243[#41243] (issue: {issue}40775[#40775]) +* SQL: Change schema calls to empty set {pull}41034[#41034] (issue: {issue}41028[#41028]) +* SQL: Use ResultSets over exceptions in metadata {pull}40641[#40641] (issue: {issue}40533[#40533]) +* SQL: Fix catalog filtering in SYS COLUMNS {pull}40583[#40583] (issue: {issue}40582[#40582]) + +Search:: +* BlendedTermQuery should ignore fields that don't exist in the index {pull}41125[#41125] (issue: {issue}41118[#41118]) +* Full text queries should not always ignore unmapped fields {pull}41062[#41062] (issue: {issue}41022[#41022]) +* ProfileScorer should propagate `setMinCompetitiveScore`. {pull}40958[#40958] +* Fix rewrite of inner queries in DisMaxQueryBuilder {pull}40956[#40956] (issue: {issue}40953[#40953]) + +Security:: +* Fix unsafe publication in opt-out query cache {pull}40957[#40957] +* Remove dynamic objects from security index {pull}40499[#40499] (issue: {issue}35460[#35460]) + +Snapshot/Restore:: +* Fix Broken Index Shard Snapshot File Preventing Snapshot Creation {pull}41310[#41310] (issue: {issue}41304[#41304]) +* Do not create missing directories in readonly repo {pull}41249[#41249] (issues: {issue}26909[#26909], {issue}41009[#41009]) + + + +[[upgrade-7.0.1]] +[float] +=== Upgrades + +Infra/Packaging:: +* Bump the bundled JDK to 12.0.1 {pull}41627[#41627] + + + +[[release-notes-7.0.0]] +== {es} version 7.0.0 + +These release notes include all changes made in the alpha, beta, and RC +releases of 7.0.0. + +Also see <>. + +[[breaking-7.0.0]] +[float] +=== Breaking changes + +Aggregations:: +* Remove support for deprecated params._agg/_aggs for scripted metric aggregations {pull}32979[#32979] (issues: {issue}29328[#29328], {issue}31597[#31597]) +* Percentile/Ranks should return null instead of NaN when empty {pull}30460[#30460] (issue: {issue}29066[#29066]) +* Render sum as zero if count is zero for stats aggregation {pull}27193[#27193] (issue: {issue}26893[#26893]) + +Analysis:: +* Remove `delimited_payload_filter` {pull}27705[#27705] (issues: {issue}26625[#26625], {issue}27704[#27704]) +* Limit the number of tokens produced by _analyze {pull}27529[#27529] (issue: {issue}27038[#27038]) +* Add limits for ngram and shingle settings {pull}27211[#27211] (issue: {issue}25887[#25887]) + +Audit:: +* Logfile auditing settings remove after deprecation {pull}35205[#35205] +* Remove index audit output type {pull}37707[#37707] (issues: {issue}29881[#29881], {issue}37301[#37301]) + +Authentication:: +* Security: remove wrapping in put user response {pull}33512[#33512] (issue: {issue}32332[#32332]) +* Remove bwc logic for token invalidation {pull}36893[#36893] (issue: {issue}36727[#36727]) + +Authorization:: +* Remove aliases resolution limitations when security is enabled {pull}31952[#31952] (issue: {issue}31516[#31516]) +* Remove implicit index monitor privilege {pull}37774[#37774] + +Circuit Breakers:: +* Lower fielddata circuit breaker's default limit {pull}27162[#27162] (issue: {issue}27130[#27130]) + +CRUD:: +* Version conflict exception message enhancement {pull}29432[#29432] (issue: {issue}21278[#21278]) +* Using ObjectParser in UpdateRequest {pull}29293[#29293] (issue: {issue}28740[#28740]) +* Remove support for internal versioning for concurrency control {pull}38254[#38254] (issue: {issue}1078[#1078]) + +Distributed:: +* Remove undocumented action.master.force_local setting {pull}29351[#29351] +* Remove tribe node support {pull}28443[#28443] +* Forbid negative values for index.unassigned.node_left.delayed_timeout {pull}26828[#26828] +* Remove cluster state size {pull}40061[#40061] (issues: {issue}39806[#39806], {issue}39827[#39827], {issue}39951[#39951], {issue}40016[#40016]) + +Features/Features:: +* Remove Migration Upgrade and Assistance APIs {pull}40075[#40075] (issue: {issue}40014[#40014]) + +Features/Indices APIs:: +* Indices Exists API should return 404 for empty wildcards {pull}34499[#34499] +* Default to one shard {pull}30539[#30539] +* Limit the number of nested documents {pull}27405[#27405] (issue: {issue}26962[#26962]) + +Features/Ingest:: +* Add Configuration Except. Data to Metdata {pull}32322[#32322] (issue: {issue}27728[#27728]) +* Add ECS schema for user-agent ingest processor (#37727) {pull}37984[#37984] (issues: {issue}37329[#37329], {issue}37727[#37727]) +* Remove special handling for ingest plugins {pull}36967[#36967] (issues: {issue}36898[#36898], {issue}36956[#36956]) + +Features/Java Low Level REST Client:: +* Drop support for the low-level REST client on JDK 7 {pull}38540[#38540] (issue: {issue}29607[#29607]) + +Features/Watcher:: +* Remove Watcher Account "unsecure" settings {pull}36736[#36736] (issue: {issue}36403[#36403]) + +Features/Stats:: +* Remove the suggest metric from stats APIs {pull}29635[#29635] (issue: {issue}29589[#29589]) +* Align cat thread pool info to thread pool config {pull}29195[#29195] (issue: {issue}29123[#29123]) +* Align thread pool info to thread pool configuration {pull}29123[#29123] (issue: {issue}29113[#29113]) + +Geo:: +* Use geohash cell instead of just a corner in geo_bounding_box {pull}30698[#30698] (issue: {issue}25154[#25154]) + +Index APIs:: +* Always enforce cluster-wide shard limit {pull}34892[#34892] (issues: {issue}20705[#20705], {issue}34021[#34021]) + +Infra/Circuit Breakers:: +* Introduce durability of circuit breaking exception {pull}34460[#34460] (issue: {issue}31986[#31986]) +* Circuit-break based on real memory usage {pull}31767[#31767] + +Infra/Core:: +* Default node.name to the hostname {pull}33677[#33677] +* Remove bulk fallback for write thread pool {pull}29609[#29609] +* CCS: Drop http address from remote cluster info {pull}29568[#29568] (issue: {issue}29207[#29207]) +* Remove the index thread pool {pull}29556[#29556] +* Main response should not have status 503 when okay {pull}29045[#29045] (issue: {issue}8902[#8902]) +* Automatically prepare indices for splitting {pull}27451[#27451] +* Don't refresh on `_flush` `_force_merge` and `_upgrade` {pull}27000[#27000] (issue: {issue}26972[#26972]) + +Infra/Logging:: +* Elasticsearch json logging {pull}36833[#36833] (issue: {issue}32850[#32850]) + +Infra/Packaging:: +* Packaging: Remove windows bin files from the tar distribution {pull}30596[#30596] +* Package ingest-user-agent as a module {pull}36956[#36956] +* Package ingest-geoip as a module {pull}36898[#36898] + +Infra/REST API:: +* Remove GET support for clear cache indices {pull}29525[#29525] +* Clear Indices Cache API remove deprecated url params {pull}29068[#29068] + +Infra/Scripting:: +* Remove support for deprecated StoredScript contexts {pull}31394[#31394] (issues: {issue}27612[#27612], {issue}28939[#28939]) +* Remove getDate methods from ScriptDocValues {pull}30690[#30690] +* Drop `ScriptDocValues#date` and `ScriptDocValues#dates` in 7.0.0 {pull}30690[#30690] (issue: {issue}23008[#23008]) + +Infra/Settings:: +* Remove config prompting for secrets and text {pull}27216[#27216] + +Machine Learning:: +* Remove types from datafeed {pull}36538[#36538] (issue: {issue}34265[#34265]) + +Mapping:: +* Match phrase queries against non-indexed fields should throw an exception {pull}31060[#31060] +* Remove legacy mapping code. {pull}29224[#29224] +* Reject updates to the `_default_` mapping. {pull}29165[#29165] (issues: {issue}15613[#15613], {issue}28248[#28248]) +* Remove the `update_all_types` option. {pull}28288[#28288] +* Remove the `_default_` mapping. {pull}28248[#28248] +* Reject the `index_options` parameter for numeric fields {pull}26668[#26668] (issue: {issue}21475[#21475]) +* Update the default for include_type_name to false. {pull}37285[#37285] +* Support 'include_type_name' in RestGetIndicesAction {pull}37149[#37149] + +Network:: +* Remove http.enabled setting {pull}29601[#29601] (issue: {issue}12792[#12792]) +* Remove HTTP max content length leniency {pull}29337[#29337] +* Remove TLS 1.0 as a default SSL protocol {pull}37512[#37512] (issue: {issue}36021[#36021]) +* Security: remove SSL settings fallback {pull}36846[#36846] (issue: {issue}29797[#29797]) + +Percolator:: +* Remove deprecated percolator map_unmapped_fields_as_string setting {pull}28060[#28060] + +Ranking:: +* Add minimal sanity checks to custom/scripted similarities. {pull}33564[#33564] (issue: {issue}33309[#33309]) +* Scroll queries asking for rescore are considered invalid {pull}32918[#32918] (issue: {issue}31775[#31775]) +* Forbid negative scores in function_score query {pull}35709[#35709] (issue: {issue}33309[#33309]) +* Forbid negative field boosts in analyzed queries {pull}37930[#37930] (issue: {issue}33309[#33309]) + +Scripting:: +* Delete deprecated getValues from ScriptDocValues {pull}36183[#36183] (issue: {issue}22919[#22919]) + +Search:: +* Remove deprecated url parameters `_source_include` and `_source_exclude` {pull}35097[#35097] (issues: {issue}22792[#22792], {issue}33475[#33475]) +* Disallow negative query boost {pull}34486[#34486] (issue: {issue}33309[#33309]) +* Forbid negative `weight` in Function Score Query {pull}33390[#33390] (issue: {issue}31927[#31927]) +* In the field capabilities API, remove support for providing fields in the request body. {pull}30185[#30185] +* Remove deprecated options for query_string {pull}29203[#29203] (issue: {issue}25551[#25551]) +* Fix Laplace scorer to multiply by alpha (and not add) {pull}27125[#27125] +* Remove _primary and _replica shard preferences {pull}26791[#26791] (issue: {issue}26335[#26335]) +* Limit the number of expanded fields it query_string and simple_query_string {pull}26541[#26541] (issue: {issue}25105[#25105]) +* Make purely negative queries return scores of 0. {pull}26015[#26015] (issue: {issue}23449[#23449]) +* Remove the deprecated _termvector endpoint. {pull}36131[#36131] (issues: {issue}36098[#36098], {issue}8484[#8484]) +* Remove deprecated Graph endpoints {pull}35956[#35956] +* Validate metadata on `_msearch` {pull}35938[#35938] (issue: {issue}35869[#35869]) +* Make hits.total an object in the search response {pull}35849[#35849] (issue: {issue}33028[#33028]) +* Remove the distinction between query and filter context in QueryBuilders {pull}35354[#35354] (issue: {issue}35293[#35293]) +* Throw a parsing exception when boost is set in span_or query (#28390) {pull}34112[#34112] (issue: {issue}28390[#28390]) +* Track total hits up to 10,000 by default {pull}37466[#37466] (issue: {issue}33028[#33028]) +* Use mappings to format doc-value fields by default. {pull}30831[#30831] (issues: {issue}26948[#26948], {issue}29639[#29639]) + +Security:: +* Remove heuristics that enable security on trial licenses {pull}38075[#38075] (issue: {issue}38009[#38009]) + +Snapshot/Restore:: +* Include size of snapshot in snapshot metadata {pull}30890[#30890] (issue: {issue}18543[#18543]) +* Remove azure deprecated settings {pull}26099[#26099] (issue: {issue}23405[#23405]) + +Store:: +* Drop elasticsearch-translog for 7.0 {pull}33373[#33373] (issues: {issue}31389[#31389], {issue}32281[#32281]) +* completely drop `index.shard.check_on_startup: fix` for 7.0 {pull}33194[#33194] + +Suggesters:: +* Fix threshold frequency computation in Suggesters {pull}34312[#34312] (issue: {issue}34282[#34282]) +* Make Geo Context Mapping Parsing More Strict {pull}32821[#32821] (issues: {issue}32202[#32202], {issue}32412[#32412]) +* Remove the ability to index or query context suggestions without context {pull}31007[#31007] (issue: {issue}30712[#30712]) + +ZenDiscovery:: +* Best-effort cluster formation if unconfigured {pull}36215[#36215] +* Remove DiscoveryPlugin#getDiscoveryTypes {pull}38414[#38414] (issue: {issue}38410[#38410]) + +[[breaking-java-7.0.0]] +[float] +=== Breaking Java changes + +Aggregations:: +* Change GeoHashGrid.Bucket#getKey() to return String {pull}31748[#31748] (issue: {issue}30320[#30320]) + +Analysis:: +* Remove deprecated AnalysisPlugin#requriesAnalysisSettings method {pull}32037[#32037] (issue: {issue}32025[#32025]) + +Features/Java High Level REST Client:: +* Drop deprecated methods from Retry {pull}33925[#33925] +* Cluster health to default to cluster level {pull}31268[#31268] (issue: {issue}29331[#29331]) +* Remove deprecated API methods {pull}31200[#31200] (issue: {issue}31069[#31069]) + +Features/Java Low Level REST Client:: +* Drop deprecated methods {pull}33223[#33223] (issues: {issue}29623[#29623], {issue}30315[#30315]) +* Remove support for maxRetryTimeout from low-level REST client {pull}38085[#38085] (issues: {issue}25951[#25951], {issue}31834[#31834], {issue}33342[#33342]) + +Geo:: +* Decouple geojson parse logic from ShapeBuilders {pull}27212[#27212] + +Infra/Core:: +* Remove RequestBuilder from Action {pull}30966[#30966] +* Handle scheduler exceptions {pull}38014[#38014] (issues: {issue}28667[#28667], {issue}36137[#36137], {issue}37708[#37708]) + +Infra/Transport API:: +* Java api clean up: remove deprecated `isShardsAcked` {pull}28311[#28311] (issues: {issue}27784[#27784], {issue}27819[#27819]) + +ZenDiscovery:: +* Make node field in JoinRequest private {pull}36405[#36405] + +[[deprecation-7.0.0]] +[float] +=== Deprecations + +Aggregations:: +* Deprecate dots in aggregation names {pull}31468[#31468] (issues: {issue}17600[#17600], {issue}19040[#19040]) + +Analysis:: +* Replace parameter unicodeSetFilter with unicode_set_filter {pull}29215[#29215] (issue: {issue}22823[#22823]) +* Replace delimited_payload_filter by delimited_payload {pull}26625[#26625] (issue: {issue}21978[#21978]) +* Deprecate Standard Html Strip Analyzer in master {pull}26719[#26719] (issue: {issue}4704[#4704]) +* Remove `nGram` and `edgeNGram` token filter names (#38911) {pull}39070[#39070] (issues: {issue}30209[#30209], {issue}38911[#38911]) + +Audit:: +* Deprecate index audit output type {pull}37301[#37301] (issue: {issue}29881[#29881]) + +Core:: +* Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] +* Add backcompat for joda time formats {pull}36531[#36531] + +Cluster Coordination:: +* Deprecate size in cluster state response {pull}39951[#39951] (issue: {issue}39806[#39806]) + +Features/Indices APIs:: +* Default copy settings to true and deprecate on the REST layer {pull}30598[#30598] +* Reject setting index.optimize_auto_generated_id after version 7.0.0 {pull}28895[#28895] (issue: {issue}27600[#27600]) + +Features/Ingest:: +* Deprecate `_type` in simulate pipeline requests {pull}37949[#37949] (issue: {issue}37731[#37731]) + +Features/Java High Level REST Client:: +* Deprecate HLRC security methods {pull}37883[#37883] (issues: {issue}36938[#36938], {issue}37540[#37540]) +* Deprecate HLRC EmptyResponse used by security {pull}37540[#37540] (issue: {issue}36938[#36938]) + +Features/Watcher:: +* Deprecate xpack.watcher.history.cleaner_service.enabled {pull}37782[#37782] (issue: {issue}32041[#32041]) +* deprecate types for watcher {pull}37594[#37594] (issue: {issue}35190[#35190]) + +Graph:: +* Deprecate types in `_graph/explore` calls. {pull}40466[#40466] + +Infra/Core:: +* Deprecate negative epoch timestamps {pull}36793[#36793] +* Deprecate use of scientific notation in epoch time parsing {pull}36691[#36691] + +Infra/Packaging:: +* Deprecate fallback to java on PATH {pull}37990[#37990] + +Infra/Scripting:: +* Add types deprecation to script contexts {pull}37554[#37554] +* Deprecate _type from LeafDocLookup {pull}37491[#37491] +* Remove deprecated params.ctx {pull}36848[#36848] (issue: {issue}34059[#34059]) + +Infra/Transport API:: +* Deprecate the transport client in favour of the high-level REST client {pull}27085[#27085] + +Machine Learning:: +* Deprecate X-Pack centric ML endpoints {pull}36315[#36315] (issue: {issue}35958[#35958]) +* Adding ml_settings entry to HLRC and Docs for deprecation_info {pull}38118[#38118] +* Datafeed deprecation checks {pull}38026[#38026] (issue: {issue}37932[#37932]) +* Remove "8" prefixes from file structure finder timestamp formats {pull}38016[#38016] +* Adjust structure finder for Joda to Java time migration {pull}37306[#37306] +* Resolve 7.0.0 TODOs in ML code {pull}36842[#36842] (issue: {issue}29963[#29963]) + +Mapping:: +* Deprecate type exists requests. {pull}34663[#34663] +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate uses of _type as a field name in queries {pull}36503[#36503] (issue: {issue}35190[#35190]) +* Deprecate types in update_by_query and delete_by_query {pull}36365[#36365] (issue: {issue}35190[#35190]) +* For msearch templates, make sure to use the right name for deprecation logging. {pull}36344[#36344] +* Deprecate types in termvector and mtermvector requests. {pull}36182[#36182] +* Deprecate types in update requests. {pull}36181[#36181] +* Deprecate types in document delete requests. {pull}36087[#36087] +* Deprecate types in get, exists, and multi get. {pull}35930[#35930] +* Deprecate types in search and multi search templates. {pull}35669[#35669] +* Deprecate types in explain requests. {pull}35611[#35611] +* Deprecate types in validate query requests. {pull}35575[#35575] +* Deprecate types in count and msearch. {pull}35421[#35421] (issue: {issue}34041[#34041]) +* Deprecate types in rollover index API {pull}38039[#38039] (issue: {issue}35190[#35190]) +* Deprecate types in get field mapping API {pull}37667[#37667] (issue: {issue}35190[#35190]) +* Deprecate types in the put mapping API. {pull}37280[#37280] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Support include_type_name in the field mapping and index template APIs. {pull}37210[#37210] +* Deprecate types in create index requests. {pull}37134[#37134] (issues: {issue}29453[#29453], {issue}37285[#37285]) +* Deprecate use of the _type field in aggregations. {pull}37131[#37131] (issue: {issue}36802[#36802]) +* Deprecate reference to _type in lookup queries {pull}37016[#37016] (issue: {issue}35190[#35190]) +* Deprecate the document create endpoint. {pull}36863[#36863] +* Deprecate types in index API {pull}36575[#36575] (issues: {issue}35190[#35190], {issue}35790[#35790]) +* Deprecate types in update APIs {pull}36225[#36225] + +Migration:: +* Deprecate X-Pack centric Migration endpoints {pull}35976[#35976] (issue: {issue}35958[#35958]) + +Monitoring:: +* Deprecate /_xpack/monitoring/* in favor of /_monitoring/* {pull}36130[#36130] (issue: {issue}35958[#35958]) + +Rollup:: +* Re-deprecate xpack rollup endpoints {pull}36451[#36451] (issue: {issue}36044[#36044]) +* Deprecate X-Pack centric rollup endpoints {pull}35962[#35962] (issue: {issue}35958[#35958]) + +Scripting:: +* Adds deprecation logging to ScriptDocValues#getValues. {pull}34279[#34279] (issue: {issue}22919[#22919]) +* Conditionally use java time api in scripting {pull}31441[#31441] + +Search:: +* Deprecate filtering on `_type`. {pull}29468[#29468] (issue: {issue}15613[#15613]) +* Remove X-Pack centric graph endpoints {pull}36010[#36010] (issue: {issue}35958[#35958]) +* Deprecate use of type in reindex request body {pull}36823[#36823] +* Add typless endpoints for get_source and exist_source {pull}36426[#36426] + +Security:: +* Deprecate X-Pack centric license endpoints {pull}35959[#35959] (issue: {issue}35958[#35958]) +* Deprecate /_xpack/security/* in favor of /_security/* {pull}36293[#36293] (issue: {issue}35958[#35958]) + +SQL:: +* Deprecate X-Pack SQL translate endpoint {pull}36030[#36030] +* Deprecate X-Pack centric SQL endpoints {pull}35964[#35964] (issue: {issue}35958[#35958]) + +Watcher:: +* Deprecate X-Pack centric watcher endpoints {pull}36218[#36218] (issue: {issue}35958[#35958]) + + +[[feature-7.0.0]] +[float] +=== New features + +Allocation:: +* Node repurpose tool {pull}39403[#39403] (issues: {issue}37347[#37347], {issue}37748[#37748]) + +Analysis:: +* Relax TermVectors API to work with textual fields other than TextFieldType {pull}31915[#31915] (issue: {issue}31902[#31902]) +* Add support for inlined user dictionary in Nori {pull}36123[#36123] (issue: {issue}35842[#35842]) +* Add a prebuilt ICU Analyzer {pull}34958[#34958] (issue: {issue}34285[#34285]) + +Authentication:: +* Add support for API keys to access Elasticsearch {pull}38291[#38291] (issue: {issue}34383[#34383]) +* OIDC realm authentication flows {pull}37787[#37787] +* OIDC Realm JWT+JWS related functionality {pull}37272[#37272] (issues: {issue}35339[#35339], {issue}37009[#37009]) +* OpenID Connect Realm base functionality {pull}37009[#37009] (issue: {issue}35339[#35339]) + +Authorization:: +* Allow custom authorization with an authorization engine {pull}38358[#38358] (issues: {issue}32435[#32435], {issue}36245[#36245], {issue}37328[#37328], {issue}37495[#37495], {issue}37785[#37785], {issue}38137[#38137], {issue}38219[#38219]) +* Wildcard IndicesPermissions don't cover .security {pull}36765[#36765] + +CCR:: +* Generalize search.remote settings to cluster.remote {pull}33413[#33413] +* Add ccr follow info api {pull}37408[#37408] (issue: {issue}37127[#37127]) + +Distributed:: +* Log messages from allocation commands {pull}25955[#25955] (issues: {issue}22821[#22821], {issue}25325[#25325]) + +Features/ILM:: +* Add unfollow action {pull}36970[#36970] (issue: {issue}34648[#34648]) + +Features/Ingest:: +* Revert "Introduce a Hashing Processor (#31087)" {pull}32178[#32178] +* Add ingest-attachment support for per document `indexed_chars` limit {pull}28977[#28977] (issue: {issue}28942[#28942]) + +Features/Java High Level REST Client:: +* GraphClient for the high level REST client and associated tests {pull}32366[#32366] + +Features/Monitoring:: +* Collect only display_name (for now) {pull}35265[#35265] (issue: {issue}8445[#8445]) + +Geo:: +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) +* geotile_grid implementation {pull}37842[#37842] (issue: {issue}30240[#30240]) +* Fork Lucene's LatLonShape Classes to local lucene package {pull}36794[#36794] +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}36751[#36751] (issue: {issue}35320[#35320]) +* Integrate Lucene's LatLonShape (BKD Backed GeoShapes) as default `geo_shape` indexing approach {pull}35320[#35320] (issue: {issue}32039[#32039]) + +Infra/Core:: +* Skip shard refreshes if shard is `search idle` {pull}27500[#27500] + +Infra/Logging:: +* Logging: Unify log rotation for index/search slow log {pull}27298[#27298] + +Infra/Plugins:: +* Reload secure settings for plugins {pull}31383[#31383] (issue: {issue}29135[#29135]) + +Infra/REST API:: +* Add an `include_type_name` option. {pull}29453[#29453] (issue: {issue}15613[#15613]) + +Java High Level REST Client:: +* Add rollup search {pull}36334[#36334] (issue: {issue}29827[#29827]) + +Java Low Level REST Client:: +* Make warning behavior pluggable per request {pull}36345[#36345] +* Add PreferHasAttributeNodeSelector {pull}36005[#36005] + +Machine Learning:: +* Filter undefined job groups from update job calendar actions {pull}30757[#30757] +* Add delayed datacheck to the datafeed job runner {pull}35387[#35387] (issue: {issue}35131[#35131]) +* Adds set_upgrade_mode API endpoint {pull}37837[#37837] + +Mapping:: +* Add a `feature_vector` field. {pull}31102[#31102] (issue: {issue}27552[#27552]) +* Expose Lucene's FeatureField. {pull}30618[#30618] +* Make typeless APIs usable with indices whose type name is different from `_doc` {pull}35790[#35790] (issue: {issue}35190[#35190]) +* Give precedence to index creation when mixing typed templates with typeless index creation and vice-versa. {pull}37871[#37871] (issue: {issue}37773[#37773]) +* Add nanosecond field mapper {pull}37755[#37755] (issues: {issue}27330[#27330], {issue}32601[#32601]) + +Ranking:: +* Add ranking evaluation API {pull}27478[#27478] (issue: {issue}19195[#19195]) + +Recovery:: +* Allow to trim all ops above a certain seq# with a term lower than X, … {pull}31211[#31211] (issue: {issue}10708[#10708]) + +SQL:: +* Add basic support for ST_AsWKT geo function {pull}34205[#34205] +* Add support for SYS GEOMETRY_COLUMNS {pull}30496[#30496] (issue: {issue}29872[#29872]) +* Introduce HISTOGRAM grouping function {pull}36510[#36510] (issue: {issue}36509[#36509]) +* DATABASE() and USER() system functions {pull}35946[#35946] (issue: {issue}35863[#35863]) +* Introduce INTERVAL support {pull}35521[#35521] (issue: {issue}29990[#29990]) +* Allow sorting of groups by aggregates {pull}38042[#38042] (issue: {issue}35118[#35118]) +* Implement FIRST/LAST aggregate functions {pull}37936[#37936] (issue: {issue}35639[#35639]) +* Introduce SQL DATE data type {pull}37693[#37693] (issue: {issue}37340[#37340]) + +Search:: +* Add “took” timing info to response for _msearch/template API {pull}30961[#30961] (issue: {issue}30957[#30957]) +* Add allow_partial_search_results flag to search requests with default setting true {pull}28440[#28440] (issue: {issue}27435[#27435]) +* Enable adaptive replica selection by default {pull}26522[#26522] (issue: {issue}24915[#24915]) +* Add intervals query {pull}36135[#36135] (issues: {issue}29636[#29636], {issue}32406[#32406]) +* Added soft limit to open scroll contexts #25244 {pull}36009[#36009] (issue: {issue}25244[#25244]) +* Introduce ability to minimize round-trips in CCS {pull}37828[#37828] (issues: {issue}32125[#32125], {issue}37566[#37566]) +* Add script filter to intervals {pull}36776[#36776] +* Add the ability to set the number of hits to track accurately {pull}36357[#36357] (issue: {issue}33028[#33028]) +* Add a maximum search request size. {pull}26423[#26423] +* Make IntervalQuery available via the Query DSL {pull}36135[#36135] (issue: {issue}29636[#29636]) + +Security:: +* Switch internal security index to ".security-7" {pull}39337[#39337] (issue: {issue}39284[#39284]) + +Suggesters:: +* Serialize suggestion responses as named writeables {pull}30284[#30284] (issue: {issue}26585[#26585]) + + +[[enhancement-7.0.0]] +[float] +=== Enhancements + +Aggregations:: +* Uses MergingDigest instead of AVLDigest in percentiles agg {pull}28702[#28702] (issue: {issue}19528[#19528]) +* Added keyed response to pipeline percentile aggregations 22302 {pull}36392[#36392] (issue: {issue}22302[#22302]) +* Enforce max_buckets limit only in the final reduction phase {pull}36152[#36152] (issues: {issue}32125[#32125], {issue}35921[#35921]) +* Histogram aggs: add empty buckets only in the final reduce step {pull}35921[#35921] +* Handles exists query in composite aggs {pull}35758[#35758] +* Added parent validation for auto date histogram {pull}35670[#35670] +* Add Composite to AggregationBuilders {pull}38207[#38207] (issue: {issue}38020[#38020]) +* Allow nested fields in the composite aggregation {pull}37178[#37178] (issue: {issue}28611[#28611]) +* Remove single shard optimization when suggesting shard_size {pull}37041[#37041] (issue: {issue}32125[#32125]) +* Use List instead of priority queue for stable sorting in bucket sort aggregator {pull}36748[#36748] (issue: {issue}36322[#36322]) +* Keys are compared in BucketSortPipelineAggregation so making key type… {pull}36407[#36407] + +Allocation:: +* Fail start on obsolete indices documentation {pull}37786[#37786] (issue: {issue}27073[#27073]) +* Fail start on invalid index metadata {pull}37748[#37748] (issue: {issue}27073[#27073]) +* Fail start of non-data node if node has data {pull}37347[#37347] (issue: {issue}27073[#27073]) + +Analysis:: +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) +* Ensure TokenFilters only produce single tokens when parsing synonyms {pull}34331[#34331] (issue: {issue}34298[#34298]) +* Allow word_delimiter_graph_filter to not adjust internal offsets {pull}36699[#36699] (issues: {issue}33710[#33710], {issue}34741[#34741]) + +Audit:: +* Add "request.id" to file audit logs {pull}35536[#35536] +* Security Audit includes HTTP method for requests {pull}37322[#37322] (issue: {issue}29765[#29765]) +* Add X-Forwarded-For to the logfile audit {pull}36427[#36427] + +Authentication:: +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Add DEBUG/TRACE logs for LDAP bind {pull}36028[#36028] +* Add Tests for findSamlRealm {pull}35905[#35905] +* Add realm information for Authenticate API {pull}35648[#35648] +* Formal support for "password_hash" in Put User {pull}35242[#35242] (issue: {issue}34729[#34729]) +* Propagate auth result to listeners {pull}36900[#36900] (issue: {issue}30794[#30794]) +* Reorder realms based on last success {pull}36878[#36878] +* Improve error message for 6.x style realm settings {pull}36876[#36876] (issues: {issue}30241[#30241], {issue}36026[#36026]) +* Change missing authn message to not mention tokens {pull}36750[#36750] +* Invalidate Token API enhancements - HLRC {pull}36362[#36362] (issue: {issue}35388[#35388]) +* Enhance Invalidate Token API {pull}35388[#35388] (issues: {issue}34556[#34556], {issue}35115[#35115]) + +Authorization:: +* Improve exact index matching performance {pull}36017[#36017] +* `manage_token` privilege for `kibana_system` {pull}35751[#35751] +* Grant .tasks access to kibana_system role {pull}35573[#35573] +* Add apm_user reserved role {pull}38206[#38206] +* Permission for restricted indices {pull}37577[#37577] (issue: {issue}34454[#34454]) +* Remove kibana_user and kibana_dashboard_only_user index privileges {pull}37441[#37441] +* Create snapshot role {pull}35820[#35820] (issue: {issue}34454[#34454]) + +Build:: +* Sounds like typo in exception message {pull}35458[#35458] +* Allow set section in setup section of REST tests {pull}34678[#34678] + +CCR:: +* Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Clean followed leader index UUIDs in auto follow metadata {pull}36408[#36408] (issue: {issue}33007[#33007]) +* Change AutofollowCoordinator to use wait_for_metadata_version {pull}36264[#36264] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Refactor AutoFollowCoordinator to track leader indices per remote cluster {pull}36031[#36031] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Concurrent file chunk fetching for CCR restore {pull}38495[#38495] +* Tighten mapping syncing in ccr remote restore {pull}38071[#38071] (issues: {issue}36879[#36879], {issue}37887[#37887]) +* Do not allow put mapping on follower {pull}37675[#37675] (issue: {issue}30086[#30086]) +* Added ccr to xpack usage infrastructure {pull}37256[#37256] (issue: {issue}37221[#37221]) +* FollowingEngine should fail with 403 if operation has no seqno assigned {pull}37213[#37213] +* Added auto_follow_exception.timestamp field to auto follow stats {pull}36947[#36947] +* Add time since last auto follow fetch to auto follow stats {pull}36542[#36542] (issues: {issue}33007[#33007], {issue}35895[#35895]) +* Reduce retention lease sync intervals {pull}40302[#40302] +* Renew retention leases while following {pull}39335[#39335] (issues: {issue}37165[#37165], {issue}38718[#38718]) +* Reduce refresh when lookup term in FollowingEngine {pull}39184[#39184] +* Integrate retention leases to recovery from remote {pull}38829[#38829] (issue: {issue}37165[#37165]) +* Enable removal of retention leases {pull}38751[#38751] (issue: {issue}37165[#37165]) +* Introduce forget follower API {pull}39718[#39718] (issue: {issue}37165[#37165]) + +Client:: +* Fixed required fields and paths list {pull}39358[#39358] + +Cluster Coordination:: +* Remove timeout task after completing cluster state publication {pull}40411[#40411] +* Use default discovery implementation for single-node discovery {pull}40036[#40036] +* Do not log unsuccessful join attempt each time {pull}39756[#39756] + +Core:: +* Override the JVM DNS cache policy {pull}36570[#36570] +* Replace usages of AtomicBoolean based block of code by the RunOnce class {pull}35553[#35553] (issue: {issue}35489[#35489]) +* Added wait_for_metadata_version parameter to cluster state api. {pull}35535[#35535] +* Extract RunOnce into a dedicated class {pull}35489[#35489] +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) + +CRUD:: +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add Seq# based optimistic concurrency control to UpdateRequest {pull}37872[#37872] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Introduce ssl settings to reindex from remote {pull}37527[#37527] (issues: {issue}29755[#29755], {issue}37287[#37287]) +* Use Sequence number powered OCC for processing updates {pull}37308[#37308] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Document Seq No powered optimistic concurrency control {pull}37284[#37284] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Enable IPv6 URIs in reindex from remote {pull}36874[#36874] +* Rename seq# powered optimistic concurrency control parameters to ifSeqNo/ifPrimaryTerm {pull}36757[#36757] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Expose Sequence Number based Optimistic Concurrency Control in the rest layer {pull}36721[#36721] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add doc's sequence number + primary term to GetResult and use it for updates {pull}36680[#36680] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add seq no powered optimistic locking support to the index and delete transport actions {pull}36619[#36619] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Set acking timeout to 0 on dynamic mapping update {pull}31140[#31140] (issues: {issue}30672[#30672], {issue}30844[#30844]) + +Discovery-Plugins:: +* Adds connect and read timeouts to discovery-gce {pull}28193[#28193] (issue: {issue}24313[#24313]) + +Distributed:: +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Refactor MetaDataIndexStateService {pull}36354[#36354] (issue: {issue}36249[#36249]) +* [Close Index API] Add TransportShardCloseAction for pre-closing verifications {pull}36249[#36249] +* TransportResyncReplicationAction should not honour blocks {pull}35795[#35795] (issues: {issue}35332[#35332], {issue}35597[#35597]) +* Expose all permits acquisition in IndexShard and TransportReplicationAction {pull}35540[#35540] (issue: {issue}33888[#33888]) +* [RCI] Check blocks while having index shard permit in TransportReplicationAction {pull}35332[#35332] (issue: {issue}33888[#33888]) +* Recover retention leases during peer recovery {pull}38435[#38435] (issue: {issue}37165[#37165]) +* Lift retention lease expiration to index shard {pull}38380[#38380] (issues: {issue}37165[#37165], {issue}37963[#37963], {issue}38070[#38070]) +* Introduce retention lease background sync {pull}38262[#38262] (issue: {issue}37165[#37165]) +* Allow shards of closed indices to be replicated as regular shards {pull}38024[#38024] (issue: {issue}33888[#33888]) +* Expose retention leases in shard stats {pull}37991[#37991] (issue: {issue}37165[#37165]) +* Introduce retention leases versioning {pull}37951[#37951] (issue: {issue}37165[#37165]) +* Soft-deletes policy should always fetch latest leases {pull}37940[#37940] (issues: {issue}37165[#37165], {issue}37375[#37375]) +* Sync retention leases on expiration {pull}37902[#37902] (issue: {issue}37165[#37165]) +* Ignore shard started requests when primary term does not match {pull}37899[#37899] (issue: {issue}33888[#33888]) +* Move update and delete by query to use seq# for optimistic concurrency control {pull}37857[#37857] (issues: {issue}10708[#10708], {issue}36148[#36148], {issue}37639[#37639]) +* Introduce retention lease serialization {pull}37447[#37447] (issues: {issue}37165[#37165], {issue}37398[#37398]) +* Add run under primary permit method {pull}37440[#37440] (issue: {issue}37398[#37398]) +* Introduce retention lease syncing {pull}37398[#37398] (issue: {issue}37165[#37165]) +* Introduce retention lease persistence {pull}37375[#37375] (issue: {issue}37165[#37165]) +* Add validation for retention lease construction {pull}37312[#37312] (issue: {issue}37165[#37165]) +* Introduce retention lease expiration {pull}37195[#37195] (issue: {issue}37165[#37165]) +* Introduce shard history retention leases {pull}37167[#37167] (issue: {issue}37165[#37165]) +* [Close Index API] Add unique UUID to ClusterBlock {pull}36775[#36775] +* [Close Index API] Mark shard copy as stale if needed during shard verification {pull}36755[#36755] +* [Close Index API] Propagate tasks ids between Freeze, Close and Verify Shard actions {pull}36630[#36630] +* Always initialize the global checkpoint {pull}34381[#34381] +* Introduce retention lease actions {pull}38756[#38756] (issue: {issue}37165[#37165]) +* Add dedicated retention lease exceptions {pull}38754[#38754] (issue: {issue}37165[#37165]) +* Copy retention leases when trim unsafe commits {pull}37995[#37995] (issue: {issue}37165[#37165]) +* Allow retention lease operations under blocks {pull}39089[#39089] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Remove retention leases when unfollowing {pull}39088[#39088] (issues: {issue}34648[#34648], {issue}37165[#37165]) +* Introduce retention lease state file {pull}39004[#39004] (issues: {issue}37165[#37165], {issue}38588[#38588], {issue}39032[#39032]) +* Enable soft-deletes by default for 7.0+ indices {pull}38929[#38929] (issue: {issue}36141[#36141]) + +Engine:: +* Remove versionType from translog {pull}31945[#31945] +* Do retry if primary fails on AsyncAfterWriteAction {pull}31857[#31857] (issues: {issue}31716[#31716], {issue}31755[#31755]) +* handle AsyncAfterWriteAction exception before listener is registered {pull}31755[#31755] (issue: {issue}31716[#31716]) +* Use IndexWriter#flushNextBuffer to free memory {pull}27753[#27753] +* Remove pre 6.0.0 support from InternalEngine {pull}27720[#27720] +* Add sequence numbers based optimistic concurrency control support to Engine {pull}36467[#36467] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Require soft-deletes when access changes snapshot {pull}36446[#36446] +* Use delCount of SegmentInfos to calculate numDocs {pull}36323[#36323] +* Always configure soft-deletes field of IndexWriterConfig {pull}36196[#36196] (issue: {issue}36141[#36141]) +* Enable soft-deletes by default on 7.0.0 or later {pull}36141[#36141] +* Always return false from `refreshNeeded` on ReadOnlyEngine {pull}35837[#35837] (issue: {issue}35785[#35785]) +* Add a `_freeze` / `_unfreeze` API {pull}35592[#35592] (issue: {issue}34352[#34352]) +* [RCI] Add IndexShardOperationPermits.asyncBlockOperations(ActionListener) {pull}34902[#34902] (issue: {issue}33888[#33888]) +* Specialize pre-closing checks for engine implementations {pull}38702[#38702] +* Ensure that max seq # is equal to the global checkpoint when creating ReadOnlyEngines {pull}37426[#37426] +* Enable Bulk-Merge if all source remains {pull}37269[#37269] +* Rename setting to enable mmap {pull}37070[#37070] (issue: {issue}36668[#36668]) +* Add hybridfs store type {pull}36668[#36668] +* Introduce time-based retention policy for soft-deletes {pull}34943[#34943] (issue: {issue}34908[#34908]) +* Handle AsyncAfterWriteAction failure on primary in the same way as failures on replicas {pull}31969[#31969] (issues: {issue}31716[#31716], {issue}31755[#31755]) +* Explicitly advance max_seq_no before indexing {pull}39473[#39473] (issue: {issue}38879[#38879]) +* Also mmap cfs files for hybridfs {pull}38940[#38940] (issue: {issue}36668[#36668]) + +Features/CAT APIs:: +* Expose `search.throttled` on `_cat/indices` {pull}37073[#37073] (issue: {issue}34352[#34352]) + +Features/Features:: +* Run Node deprecation checks locally (#38065) {pull}38250[#38250] (issue: {issue}38065[#38065]) + +Features/ILM:: +* Ensure ILM policies run safely on leader indices {pull}38140[#38140] (issue: {issue}34648[#34648]) +* Skip Shrink when numberOfShards not changed {pull}37953[#37953] (issue: {issue}33275[#33275]) +* Inject Unfollow before Rollover and Shrink {pull}37625[#37625] (issue: {issue}34648[#34648]) +* Add set_priority action to ILM {pull}37397[#37397] (issue: {issue}36905[#36905]) +* Add Freeze Action {pull}36910[#36910] (issue: {issue}34630[#34630]) + +Features/Indices APIs:: +* Add cluster-wide shard limit {pull}32856[#32856] (issue: {issue}20705[#20705]) +* Remove RestGetAllAliasesAction {pull}31308[#31308] (issue: {issue}31129[#31129]) +* Add rollover-creation-date setting to rolled over index {pull}31144[#31144] (issue: {issue}30887[#30887]) +* add is-write-index flag to aliases {pull}30942[#30942] +* Make index and bulk APIs work without types. {pull}29479[#29479] +* Simplify deprecation issue levels {pull}36326[#36326] +* New mapping signature and mapping string source fixed. {pull}37401[#37401] + +Features/Ingest:: +* Add ignore_missing property to foreach filter (#22147) {pull}31578[#31578] (issue: {issue}22147[#22147]) +* Compile mustache template only if field includes '{{'' {pull}37207[#37207] (issue: {issue}37120[#37120]) +* Move ingest-geoip default databases out of config {pull}36949[#36949] (issue: {issue}36898[#36898]) +* Make the ingest-geoip databases even lazier to load {pull}36679[#36679] +* Updates the grok patterns to be consistent with the logstash {pull}27181[#27181] + +Features/Java High Level REST Client:: +* HLRC API for _termvectors {pull}32610[#32610] (issue: {issue}27205[#27205]) +* Fix strict setting exception handling {pull}37247[#37247] (issue: {issue}37090[#37090]) +* Use nonblocking entity for requests {pull}32249[#32249] + +Features/Monitoring:: +* Make Exporters Async {pull}35765[#35765] (issue: {issue}35743[#35743]) +* Adding mapping for hostname field {pull}37288[#37288] +* Remove types from internal monitoring templates and bump to api 7 {pull}39888[#39888] (issue: {issue}38637[#38637]) + +Features/Stats:: +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}26973[#26973] +* Add JVM dns cache expiration config to JvmInfo {pull}36372[#36372] + +Features/Watcher:: +* Validate email adresses when storing a watch {pull}34042[#34042] (issue: {issue}33980[#33980]) +* Move watcher to use seq# and primary term for concurrency control {pull}37977[#37977] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Use ILM for Watcher history deletion {pull}37443[#37443] (issue: {issue}32041[#32041]) +* Add whitelist to HttpClient {pull}36817[#36817] (issue: {issue}29937[#29937]) +* Remove the index type from internal watcher indexes {pull}39761[#39761] (issue: {issue}38637[#38637]) + +Geo:: +* Adds a name of the field to geopoint parsing errors {pull}36529[#36529] (issue: {issue}15965[#15965]) +* Add support to ShapeBuilders for building Lucene geometry {pull}35707[#35707] (issue: {issue}35320[#35320]) +* Add ST_WktToSQL function {pull}35416[#35416] (issue: {issue}29872[#29872]) + +Index APIs:: +* Add cluster-wide shard limit warnings {pull}34021[#34021] (issues: {issue}20705[#20705], {issue}32856[#32856]) + +Infra/Circuit Breakers:: +* Have circuit breaker succeed on unknown mem usage {pull}33125[#33125] (issue: {issue}31767[#31767]) +* Account for XContent overhead in in-flight breaker {pull}31613[#31613] +* Script Stats: Add compilation limit counter to stats {pull}26387[#26387] + +Infra/Core:: +* Add RunOnce utility class that executes a Runnable exactly once {pull}35484[#35484] +* Improved IndexNotFoundException's default error message {pull}34649[#34649] (issue: {issue}34628[#34628]) +* fix a few versionAdded values in ElasticsearchExceptions {pull}37877[#37877] +* Add simple method to write collection of writeables {pull}37448[#37448] (issue: {issue}37398[#37398]) +* Date/Time parsing: Use java time API instead of exception handling {pull}37222[#37222] +* [API] spelling: interruptible {pull}37049[#37049] (issue: {issue}37035[#37035]) +* Enhancements to IndicesQueryCache. {pull}39099[#39099] (issue: {issue}37117[#37117]) +* Change zone formatting for all printers {pull}39568[#39568] (issue: {issue}38471[#38471]) + +Infra/Logging:: +* Trim the JSON source in indexing slow logs {pull}38081[#38081] (issue: {issue}38080[#38080]) +* Optimize warning header de-duplication {pull}37725[#37725] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597], {issue}37622[#37622]) +* Remove warn-date from warning headers {pull}37622[#37622] (issues: {issue}35754[#35754], {issue}37530[#37530], {issue}37597[#37597]) +* Add some deprecation optimizations {pull}37597[#37597] (issues: {issue}35754[#35754], {issue}37530[#37530]) +* Only update response headers if we have a new one {pull}37590[#37590] (issues: {issue}35754[#35754], {issue}37530[#37530]) + +Infra/Packaging:: +* Choose JVM options ergonomically {pull}30684[#30684] +* Add OS/architecture classifier to distributions {pull}37881[#37881] +* Change file descriptor limit to 65535 {pull}37537[#37537] (issue: {issue}35839[#35839]) +* Exit batch files explictly using ERRORLEVEL {pull}29583[#29583] (issue: {issue}29582[#29582]) +* Add no-jdk distributions {pull}39882[#39882] +* Allow AVX-512 on JDK 11+ {pull}40828[#40828] (issue: {issue}32138[#32138]) + +Infra/REST API:: +* Remove hand-coded XContent duplicate checks {pull}34588[#34588] (issues: {issue}22073[#22073], {issue}22225[#22225], {issue}22253[#22253]) +* Add the `include_type_name` option to the search and document APIs. {pull}29506[#29506] (issue: {issue}15613[#15613]) +* Validate `op_type` for `_create` {pull}27483[#27483] + +Infra/Scripting:: +* Tests: Add support for custom contexts to mock scripts {pull}34100[#34100] +* Reflect factory signatures in painless classloader {pull}34088[#34088] +* Handle missing values in painless {pull}32207[#32207] (issue: {issue}29286[#29286]) +* Add getZone to JodaCompatibleZonedDateTime {pull}37084[#37084] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] + +Infra/Packaging:: +* Use bundled JDK in Docker images {pull}40238[#40238] +* Upgrade bundled JDK and Docker images to JDK 12 {pull}40229[#40229] +* Bundle java in distributions {pull}38013[#38013] (issue: {issue}31845[#31845]) + +Infra/Settings:: +* Settings: Add keystore creation to add commands {pull}26126[#26126] +* Separate out validation of groups of settings {pull}34184[#34184] +* Provide a clearer error message on keystore add {pull}39327[#39327] (issue: {issue}39324[#39324]) + +Infra/Transport API:: +* Change BWC version for VerifyRepositoryResponse {pull}30796[#30796] (issue: {issue}30762[#30762]) + +Ingest:: +* Grok fix duplicate patterns JAVACLASS and JAVAFILE {pull}35886[#35886] +* Implement Drop Processor {pull}32278[#32278] (issue: {issue}23726[#23726]) + +Java High Level REST Client:: +* Add get users action {pull}36332[#36332] (issue: {issue}29827[#29827]) +* Add delete template API {pull}36320[#36320] (issue: {issue}27205[#27205]) +* Implement get-user-privileges API {pull}36292[#36292] +* Get Deprecation Info API {pull}36279[#36279] (issue: {issue}29827[#29827]) +* Add support for Follow Stats API {pull}36253[#36253] (issue: {issue}33824[#33824]) +* Add support for CCR Stats API {pull}36213[#36213] (issue: {issue}33824[#33824]) +* Put Role {pull}36209[#36209] (issue: {issue}29827[#29827]) +* Add index templates exist API {pull}36132[#36132] (issue: {issue}27205[#27205]) +* Add support for CCR Get Auto Follow Pattern apis {pull}36049[#36049] (issue: {issue}33824[#33824]) +* Add support for CCR Delete Auto Follow Pattern API {pull}35981[#35981] (issue: {issue}33824[#33824]) +* Remove fromXContent from IndexUpgradeInfoResponse {pull}35934[#35934] +* Add delete expired data API {pull}35906[#35906] (issue: {issue}29827[#29827]) +* Execute watch API {pull}35868[#35868] (issue: {issue}29827[#29827]) +* Add ability to put user with a password hash {pull}35844[#35844] (issue: {issue}35242[#35242]) +* Add ML find file structure API {pull}35833[#35833] (issue: {issue}29827[#29827]) +* Add support for get roles API {pull}35787[#35787] (issue: {issue}29827[#29827]) +* Added support for CCR Put Auto Follow Pattern API {pull}35780[#35780] (issue: {issue}33824[#33824]) +* XPack ML info action {pull}35777[#35777] (issue: {issue}29827[#29827]) +* ML Delete event from Calendar {pull}35760[#35760] (issue: {issue}29827[#29827]) +* Add ML revert model snapshot API {pull}35750[#35750] (issue: {issue}29827[#29827]) +* ML Get Calendar Events {pull}35747[#35747] (issue: {issue}29827[#29827]) +* Add high-level REST client API for `_freeze` and `_unfreeze` {pull}35723[#35723] (issue: {issue}34352[#34352]) +* Fix issue in equals impl for GlobalOperationPrivileges {pull}35721[#35721] +* ML Delete job from calendar {pull}35713[#35713] (issue: {issue}29827[#29827]) +* ML Add Event To Calendar API {pull}35704[#35704] (issue: {issue}29827[#29827]) +* Add ML update model snapshot API (#35537) {pull}35694[#35694] (issue: {issue}29827[#29827]) +* Add support for CCR Unfollow API {pull}35693[#35693] (issue: {issue}33824[#33824]) +* Clean up PutLicenseResponse {pull}35689[#35689] (issue: {issue}35547[#35547]) +* Clean up StartBasicResponse {pull}35688[#35688] (issue: {issue}35547[#35547]) +* Add support for put privileges API {pull}35679[#35679] +* ML Add Job to Calendar API {pull}35666[#35666] (issue: {issue}29827[#29827]) +* Add support for CCR Resume Follow API {pull}35638[#35638] (issue: {issue}33824[#33824]) +* Add support for get application privileges API {pull}35556[#35556] (issue: {issue}29827[#29827]) +* Clean up XPackInfoResponse class and related tests {pull}35547[#35547] +* Add parameters to stopRollupJob API {pull}35545[#35545] (issue: {issue}34811[#34811]) +* Add ML delete model snapshot API {pull}35537[#35537] (issue: {issue}29827[#29827]) +* Add get watch API {pull}35531[#35531] (issue: {issue}29827[#29827]) +* Add ML Update Filter API {pull}35522[#35522] (issue: {issue}29827[#29827]) +* Add ml get filters api {pull}35502[#35502] (issue: {issue}29827[#29827]) +* Add ML get model snapshots API {pull}35487[#35487] (issue: {issue}29827[#29827]) +* Add "_has_privileges" API to Security Client {pull}35479[#35479] (issue: {issue}29827[#29827]) +* Add Delete Privileges API to HLRC {pull}35454[#35454] (issue: {issue}29827[#29827]) +* Add support for CCR Put Follow API {pull}35409[#35409] +* Add ML delete filter action {pull}35382[#35382] (issue: {issue}29827[#29827]) +* Add delete user action {pull}35294[#35294] (issue: {issue}29827[#29827]) +* HLRC for _mtermvectors {pull}35266[#35266] (issues: {issue}27205[#27205], {issue}33447[#33447]) +* Reindex API with wait_for_completion false {pull}35202[#35202] (issue: {issue}27205[#27205]) +* Add watcher stats API {pull}35185[#35185] (issue: {issue}29827[#29827]) +* HLRC support for getTask {pull}35166[#35166] (issue: {issue}27205[#27205]) +* Add GetRollupIndexCaps API {pull}35102[#35102] (issue: {issue}29827[#29827]) +* HLRC: migration api - upgrade {pull}34898[#34898] (issue: {issue}29827[#29827]) +* Add stop rollup job support to HL REST Client {pull}34702[#34702] (issue: {issue}29827[#29827]) +* Bulk Api support for global parameters {pull}34528[#34528] (issue: {issue}26026[#26026]) +* Add delete rollup job support to HL REST Client {pull}34066[#34066] (issue: {issue}29827[#29827]) +* Add support for get license basic/trial status API {pull}33176[#33176] (issue: {issue}29827[#29827]) +* Add machine learning open job {pull}32860[#32860] (issue: {issue}29827[#29827]) +* Add ML HLRC wrapper and put_job API call {pull}32726[#32726] +* Add Get Snapshots High Level REST API {pull}31537[#31537] (issue: {issue}27205[#27205]) + +Java Low Level REST Client:: +* On retry timeout add root exception {pull}25576[#25576] + +License:: +* Require acknowledgement to start_trial license {pull}30135[#30135] (issue: {issue}30134[#30134]) +* Handle malformed license signatures {pull}37137[#37137] (issue: {issue}35340[#35340]) + +Machine Learning:: +* Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* Split in batches and migrate all jobs and datafeeds {pull}36716[#36716] (issue: {issue}32905[#32905]) +* Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) +* Add audits when deprecation warnings occur with datafeed start {pull}36233[#36233] +* Add lazy parsing for DatafeedConfig:Aggs,Query {pull}36117[#36117] +* Add support for lazy nodes (#29991) {pull}34538[#34538] (issue: {issue}29991[#29991]) +* Move ML Optimistic Concurrency Control to Seq No {pull}38278[#38278] (issues: {issue}10708[#10708], {issue}36148[#36148]) +* Add explanation so far to file structure finder exceptions {pull}38191[#38191] (issue: {issue}29821[#29821]) +* Add reason field in JobTaskState {pull}38029[#38029] (issue: {issue}34431[#34431]) +* Add _meta information to all ML indices {pull}37964[#37964] +* Add upgrade mode docs, hlrc, and fix bug {pull}37942[#37942] +* Tighten up use of aliases rather than concrete indices {pull}37874[#37874] +* Add support for single bucket aggs in Datafeeds {pull}37544[#37544] (issue: {issue}36838[#36838]) +* Create the ML annotations index {pull}36731[#36731] (issues: {issue}26034[#26034], {issue}33376[#33376]) +* Merge the Jindex master feature branch {pull}36702[#36702] (issue: {issue}32905[#32905]) +* Add cluster setting to enable/disable config migration {pull}36700[#36700] (issue: {issue}32905[#32905]) +* Allow stop unassigned datafeed and relax unset upgrade mode wait {pull}39034[#39034] + +Mapping:: +* Log document id when MapperParsingException occurs {pull}37800[#37800] (issue: {issue}37658[#37658]) +* [API] spelling: unknown {pull}37056[#37056] (issue: {issue}37035[#37035]) +* Make SourceToParse immutable {pull}36971[#36971] +* Use index-prefix fields for terms of length min_chars - 1 {pull}36703[#36703] +* Introduce a parameter suppress_types_warnings. {pull}38923[#38923] + +Network:: +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Add class for serializing message to bytes {pull}29384[#29384] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) +* Unify transport settings naming {pull}36623[#36623] +* Add sni name to SSLEngine in netty transport {pull}33144[#33144] (issue: {issue}32517[#32517]) +* Add cors support to NioHttpServerTransport {pull}30827[#30827] (issue: {issue}28898[#28898]) +* Reintroduce mandatory http pipelining support {pull}30820[#30820] +* Make http pipelining support mandatory {pull}30695[#30695] (issues: {issue}28898[#28898], {issue}29500[#29500]) +* Add nio http server transport {pull}29587[#29587] (issue: {issue}28898[#28898]) +* Selectors operate on channel contexts {pull}28468[#28468] (issue: {issue}27260[#27260]) +* Unify nio read / write channel contexts {pull}28160[#28160] (issue: {issue}27260[#27260]) +* Create nio-transport plugin for NioTransport {pull}27949[#27949] (issue: {issue}27260[#27260]) +* Add elasticsearch-nio jar for base nio classes {pull}27801[#27801] (issue: {issue}27802[#27802]) +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) +* Enable TLSv1.3 by default for JDKs with support {pull}38103[#38103] (issue: {issue}32276[#32276]) + +Packaging:: +* Introduce Docker images build {pull}36246[#36246] +* Move creation of temporary directory to Java {pull}36002[#36002] (issue: {issue}31003[#31003]) + +Percolator:: +* Make the `type` parameter optional when percolating existing documents. {pull}39987[#39987] (issue: {issue}39963[#39963]) +* Add support for selecting percolator query candidate matches containing geo_point based queries {pull}26040[#26040] + +Plugins:: +* Plugin install: don't print download progress in batch mode {pull}36361[#36361] + +Ranking:: +* Add k parameter to PrecisionAtK metric {pull}27569[#27569] +* Vector field {pull}33022[#33022] (issue: {issue}31615[#31615]) + +Recovery:: +* SyncedFlushService.getShardRoutingTable() should use metadata to check for index existence {pull}37691[#37691] (issue: {issue}33888[#33888]) +* Make prepare engine step of recovery source non-blocking {pull}37573[#37573] (issue: {issue}37174[#37174]) +* Make recovery source send operations non-blocking {pull}37503[#37503] (issue: {issue}37458[#37458]) +* Prepare to make send translog of recovery non-blocking {pull}37458[#37458] (issue: {issue}37291[#37291]) +* Make finalize step of recovery source non-blocking {pull}37388[#37388] (issue: {issue}37291[#37291]) +* Make recovery source partially non-blocking {pull}37291[#37291] (issue: {issue}36195[#36195]) +* Do not mutate RecoveryResponse {pull}37204[#37204] (issue: {issue}37174[#37174]) +* Don't block on peer recovery on the target side {pull}37076[#37076] (issue: {issue}36195[#36195]) +* Reduce recovery time with compress or secure transport {pull}36981[#36981] (issue: {issue}33844[#33844]) +* Translog corruption marker {pull}33415[#33415] (issue: {issue}31389[#31389]) +* Do not wait for advancement of checkpoint in recovery {pull}39006[#39006] (issues: {issue}38949[#38949], {issue}39000[#39000]) + +Rollup:: +* Add non-X-Pack centric rollup endpoints {pull}36383[#36383] (issues: {issue}35958[#35958], {issue}35962[#35962]) +* Add more diagnostic stats to job {pull}35471[#35471] +* Add `wait_for_completion` option to StopRollupJob API {pull}34811[#34811] (issue: {issue}34574[#34574]) +* Replace the TreeMap in the composite aggregation {pull}36675[#36675] + +Recovery:: +* Exposed engine must include all operations below global checkpoint during rollback {pull}36159[#36159] (issue: {issue}32867[#32867]) + +Scripting:: +* Update joda compat methods to use compat class {pull}36654[#36654] +* [Painless] Add boxed type to boxed type casts for method/return {pull}36571[#36571] +* [Painless] Add def to boxed type casts {pull}36506[#36506] + +Settings:: +* Add user-defined cluster metadata {pull}33325[#33325] (issue: {issue}33220[#33220]) + +Search:: +* Make limit on number of expanded fields configurable {pull}35284[#35284] (issues: {issue}26541[#26541], {issue}34778[#34778]) +* Search: Simply SingleFieldsVisitor {pull}34052[#34052] +* Don't count hits via the collector if the hit count can be computed from index stats. {pull}33701[#33701] +* Limit the number of concurrent requests per node {pull}31206[#31206] (issue: {issue}31192[#31192]) +* Default max concurrent search req. numNodes * 5 {pull}31171[#31171] (issues: {issue}30783[#30783], {issue}30994[#30994]) +* Change ScriptException status to 400 (bad request) {pull}30861[#30861] (issue: {issue}12315[#12315]) +* Change default value to true for transpositions parameter of fuzzy query {pull}26901[#26901] +* Introducing "took" time (in ms) for `_msearch` {pull}23767[#23767] (issue: {issue}23131[#23131]) +* Add copy constructor to SearchRequest {pull}36641[#36641] (issue: {issue}32125[#32125]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) +* Add sort and collapse info to SearchHits transport serialization {pull}36555[#36555] (issue: {issue}32125[#32125]) +* Add default methods to DocValueFormat {pull}36480[#36480] +* Respect indices options on _msearch {pull}35887[#35887] +* Allow efficient can_match phases on frozen indices {pull}35431[#35431] (issues: {issue}34352[#34352], {issue}34357[#34357]) +* Add a new query type - ScriptScoreQuery {pull}34533[#34533] (issues: {issue}23850[#23850], {issue}27588[#27588], {issue}30303[#30303]) +* Tie break on cluster alias when merging shard search failures {pull}38715[#38715] (issue: {issue}38672[#38672]) +* Add finalReduce flag to SearchRequest {pull}38104[#38104] (issues: {issue}37000[#37000], {issue}37838[#37838]) +* Streamline skip_unavailable handling {pull}37672[#37672] (issue: {issue}32125[#32125]) +* Expose sequence number and primary terms in search responses {pull}37639[#37639] +* Add support for merging multiple search responses into one {pull}37566[#37566] (issue: {issue}32125[#32125]) +* Allow field types to optimize phrase prefix queries {pull}37436[#37436] (issue: {issue}31921[#31921]) +* Add support for providing absolute start time to SearchRequest {pull}37142[#37142] (issue: {issue}32125[#32125]) +* Ensure that local cluster alias is never treated as remote {pull}37121[#37121] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* [API] spelling: cacheable {pull}37047[#37047] (issue: {issue}37035[#37035]) +* Add ability to suggest shard_size on coord node rewrite {pull}37017[#37017] (issues: {issue}32125[#32125], {issue}36997[#36997], {issue}37000[#37000]) +* Skip final reduction if SearchRequest holds a cluster alias {pull}37000[#37000] (issues: {issue}32125[#32125], {issue}36997[#36997]) +* Add support for local cluster alias to SearchRequest {pull}36997[#36997] (issue: {issue}32125[#32125]) +* Use SearchRequest copy constructor in ExpandSearchPhase {pull}36772[#36772] (issue: {issue}36641[#36641]) +* Add raw sort values to SearchSortValues transport serialization {pull}36617[#36617] (issue: {issue}32125[#32125]) +* Avoid BytesRef's copying in ScriptDocValues's Strings {pull}29581[#29581] (issue: {issue}29567[#29567]) + +Security:: +* Make credentials mandatory when launching xpack/migrate {pull}36197[#36197] (issues: {issue}29847[#29847], {issue}33972[#33972]) +* Move CAS operations in TokenService to sequence numbers {pull}38311[#38311] (issues: {issue}10708[#10708], {issue}37872[#37872]) +* Cleanup construction of interceptors {pull}38294[#38294] +* Add passphrase support to elasticsearch-keystore {pull}37472[#37472] (issue: {issue}32691[#32691]) +* Types removal security index template {pull}39705[#39705] (issue: {issue}38637[#38637]) +* Types removal security index template {pull}39542[#39542] (issue: {issue}38637[#38637]) + +Snapshot/Restore:: +* #31608 Add S3 Setting to Force Path Type Access {pull}34721[#34721] (issue: {issue}31608[#31608]) +* Allow Parallel Restore Operations {pull}36397[#36397] +* Repo Creation out of ClusterStateTask {pull}36157[#36157] (issue: {issue}9488[#9488]) +* Add read-only repository verification {pull}35731[#35731] (issue: {issue}35703[#35703]) +* RestoreService should update primary terms when restoring shards of existing indices {pull}38177[#38177] (issue: {issue}33888[#33888]) +* Allow open indices to be restored {pull}37733[#37733] +* Create specific exception for when snapshots are in progress {pull}37550[#37550] (issue: {issue}37541[#37541]) +* Make Atomic Blob Writes Mandatory {pull}37168[#37168] (issues: {issue}37011[#37011], {issue}37066[#37066]) +* Speed up HDFS Repository Writes {pull}37069[#37069] +* Implement Atomic Blob Writes for HDFS Repository {pull}37066[#37066] (issue: {issue}37011[#37011]) +* [API] spelling: repositories {pull}37053[#37053] (issue: {issue}37035[#37035]) +* Use CancellableThreads to Abort {pull}35901[#35901] (issue: {issue}21759[#21759]) +* S3 client encryption {pull}30513[#30513] (issues: {issue}11128[#11128], {issue}16843[#16843]) +* Mark Deleted Snapshot Directories with Tombstones {pull}40228[#40228] (issue: {issue}39852[#39852]) + +Stats:: +* Handle OS pretty name on old OS without OS release {pull}35453[#35453] (issue: {issue}35440[#35440]) + +Store:: +* Add RemoveCorruptedShardDataCommand {pull}32281[#32281] (issues: {issue}31389[#31389], {issue}32279[#32279]) +* Add option to force load term dict into memory {pull}39741[#39741] + +SQL:: +* Introduce support for NULL values {pull}34573[#34573] (issue: {issue}32079[#32079]) +* Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) +* Introduce NOW/CURRENT_TIMESTAMP function {pull}36562[#36562] (issue: {issue}36534[#36534]) +* Move requests' parameters to requests JSON body {pull}36149[#36149] (issue: {issue}35992[#35992]) +* Make INTERVAL millis optional {pull}36043[#36043] (issue: {issue}36032[#36032]) +* Implement data type verification for conditionals {pull}35916[#35916] (issue: {issue}35907[#35907]) +* Implement GREATEST and LEAST functions {pull}35879[#35879] (issue: {issue}35878[#35878]) +* Implement null safe equality operator `<=>` {pull}35873[#35873] (issue: {issue}35871[#35871]) +* SYS COLUMNS returns ODBC specific schema {pull}35870[#35870] (issue: {issue}35376[#35376]) +* Polish grammar for intervals {pull}35853[#35853] +* Add filtering to SYS TYPES {pull}35852[#35852] (issue: {issue}35342[#35342]) +* Implement NULLIF(expr1, expr2) function {pull}35826[#35826] (issue: {issue}35818[#35818]) +* Lock down JDBC driver {pull}35798[#35798] (issue: {issue}35437[#35437]) +* Implement NVL(expr1, expr2) {pull}35794[#35794] (issue: {issue}35782[#35782]) +* Implement ISNULL(expr1, expr2) {pull}35793[#35793] (issue: {issue}35781[#35781]) +* Implement IFNULL variant of COALESCE {pull}35762[#35762] (issue: {issue}35749[#35749]) +* XPack FeatureSet functionality {pull}35725[#35725] (issue: {issue}34821[#34821]) +* Perform lazy evaluation of mismatched mappings {pull}35676[#35676] (issues: {issue}35659[#35659], {issue}35675[#35675]) +* Improve validation of unsupported fields {pull}35675[#35675] (issue: {issue}35673[#35673]) +* Move internals from Joda to java.time {pull}35649[#35649] (issue: {issue}35633[#35633]) +* Allow look-ahead resolution of aliases for WHERE clause {pull}38450[#38450] (issue: {issue}29983[#29983]) +* Implement CURRENT_DATE {pull}38175[#38175] (issue: {issue}38160[#38160]) +* Generate relevant error message when grouping functions are not used in GROUP BY {pull}38017[#38017] (issue: {issue}37952[#37952]) +* Skip the nested and object field types in case of an ODBC request {pull}37948[#37948] (issue: {issue}37801[#37801]) +* Add protocol tests and remove jdbc_type from drivers response {pull}37516[#37516] (issues: {issue}36635[#36635], {issue}36882[#36882]) +* Remove slightly used meta commands {pull}37506[#37506] (issue: {issue}37409[#37409]) +* Describe aliases as views {pull}37496[#37496] (issue: {issue}37422[#37422]) +* Make `FULL` non-reserved keyword in the grammar {pull}37377[#37377] (issue: {issue}37376[#37376]) +* Use declared source for error messages {pull}37161[#37161] +* Improve error message when unable to translate to ES query DSL {pull}37129[#37129] (issue: {issue}37040[#37040]) +* [API] spelling: subtract {pull}37055[#37055] (issue: {issue}37035[#37035]) +* [API] spelling: similar {pull}37054[#37054] (issue: {issue}37035[#37035]) +* [API] spelling: input {pull}37048[#37048] (issue: {issue}37035[#37035]) +* Enhance message for PERCENTILE[_RANK] with field as 2nd arg {pull}36933[#36933] (issue: {issue}36903[#36903]) +* Preserve original source for each expression {pull}36912[#36912] (issue: {issue}36894[#36894]) +* Extend the ODBC metric by differentiating between 32 and 64bit platforms {pull}36753[#36753] (issue: {issue}36740[#36740]) +* Fix wrong appliance of StackOverflow limit for IN {pull}36724[#36724] (issue: {issue}36592[#36592]) +* Enhance checks for inexact fields {pull}39427[#39427] (issue: {issue}38501[#38501]) +* Change the default precision for CURRENT_TIMESTAMP function {pull}39391[#39391] (issue: {issue}39288[#39288]) +* Add "fuzziness" option to QUERY and MATCH function predicates {pull}40529[#40529] (issue: {issue}40495[#40495]) +* Add "validate.properties" property to JDBC's allowed list of settings {pull}39050[#39050] (issue: {issue}38068[#38068]) + +Suggesters:: +* Remove unused empty constructors from suggestions classes {pull}37295[#37295] +* [API] spelling: likelihood {pull}37052[#37052] (issue: {issue}37035[#37035]) + +Task Management:: +* Periodically try to reassign unassigned persistent tasks {pull}36069[#36069] (issue: {issue}35792[#35792]) +* Only require task permissions {pull}35667[#35667] (issue: {issue}35573[#35573]) +* Retry if task can't be written {pull}35054[#35054] (issue: {issue}33764[#33764]) + +ZenDiscovery:: +* Introduce vote withdrawal {pull}35446[#35446] +* Add basic Zen1 transport-level BWC {pull}35443[#35443] +* Add diff-based publishing {pull}35290[#35290] +* Introduce auto_shrink_voting_configuration setting {pull}35217[#35217] +* Introduce transport API for cluster bootstrapping {pull}34961[#34961] +* Reconfigure cluster as its membership changes {pull}34592[#34592] (issue: {issue}33924[#33924]) +* Fail fast on disconnects {pull}34503[#34503] +* Add storage-layer disruptions to CoordinatorTests {pull}34347[#34347] +* Add low-level bootstrap implementation {pull}34345[#34345] +* Gather votes from all nodes {pull}34335[#34335] +* Add Cluster State Applier {pull}34257[#34257] +* Add safety phase to CoordinatorTests {pull}34241[#34241] +* Integrate FollowerChecker with Coordinator {pull}34075[#34075] +* Integrate LeaderChecker with Coordinator {pull}34049[#34049] +* Trigger join when active master detected {pull}34008[#34008] +* Update PeerFinder term on term bump {pull}33992[#33992] +* Calculate optimal cluster configuration {pull}33924[#33924] +* Introduce FollowersChecker {pull}33917[#33917] +* Integrate publication pipeline into Coordinator {pull}33771[#33771] +* Add DisruptableMockTransport {pull}33713[#33713] +* Implement basic cluster formation {pull}33668[#33668] +* Introduce LeaderChecker {pull}33024[#33024] +* Add leader-side join handling logic {pull}33013[#33013] +* Add PeerFinder#onFoundPeersUpdated {pull}32939[#32939] +* Introduce PreVoteCollector {pull}32847[#32847] +* Introduce ElectionScheduler {pull}32846[#32846] +* Introduce ElectionScheduler {pull}32709[#32709] +* Add HandshakingTransportAddressConnector {pull}32643[#32643] (issue: {issue}32246[#32246]) +* Add UnicastConfiguredHostsResolver {pull}32642[#32642] (issue: {issue}32246[#32246]) +* Cluster state publication pipeline {pull}32584[#32584] (issue: {issue}32006[#32006]) +* Introduce gossip-like discovery of master nodes {pull}32246[#32246] +* Add core coordination algorithm for cluster state publishing {pull}32171[#32171] (issue: {issue}32006[#32006]) +* Add term and config to cluster state {pull}32100[#32100] (issue: {issue}32006[#32006]) +* Add discovery types to cluster stats {pull}36442[#36442] +* Introduce `zen2` discovery type {pull}36298[#36298] +* Persist cluster states the old way on non-master-eligible nodes {pull}36247[#36247] (issue: {issue}3[#3]) +* Storage layer WriteStateException propagation {pull}36052[#36052] +* Implement Tombstone REST APIs {pull}36007[#36007] +* Update default for USE_ZEN2 to true {pull}35998[#35998] +* Add warning if cluster fails to form fast enough {pull}35993[#35993] +* Allow Setting a List of Bootstrap Nodes to Wait for {pull}35847[#35847] +* VotingTombstone class {pull}35832[#35832] +* PersistedState interface implementation {pull}35819[#35819] +* Support rolling upgrades from Zen1 {pull}35737[#35737] +* Add lag detector {pull}35685[#35685] +* Move ClusterState fields to be persisted to ClusterState.MetaData {pull}35625[#35625] +* Introduce ClusterBootstrapService {pull}35488[#35488] +* Introduce vote withdrawal {pull}35446[#35446] +* Add basic Zen1 transport-level BWC {pull}35443[#35443] +* Add elasticsearch-node detach-cluster tool {pull}37979[#37979] +* Deprecate minimum_master_nodes {pull}37868[#37868] +* Step down as master when configured out of voting configuration {pull}37802[#37802] (issue: {issue}37712[#37712]) +* Enforce cluster UUIDs {pull}37775[#37775] +* Bubble exceptions up in ClusterApplierService {pull}37729[#37729] +* Use m_m_nodes from Zen1 master for Zen2 bootstrap {pull}37701[#37701] +* Add tool elasticsearch-node unsafe-bootstrap {pull}37696[#37696] +* Report terms and version if cluster does not form {pull}37473[#37473] +* Bootstrap a Zen2 cluster once quorum is discovered {pull}37463[#37463] +* Zen2: Add join validation {pull}37203[#37203] +* Publish cluster states in chunks {pull}36973[#36973] + + + +[[bug-7.0.0]] +[float] +=== Bug fixes + +Aggregations:: +* Fix InternalAutoDateHistogram reproducible failure {pull}32723[#32723] (issue: {issue}32215[#32215]) +* fix MultiValuesSourceFieldConfig toXContent {pull}36525[#36525] (issue: {issue}36474[#36474]) +* Cache the score of the parent document in the nested agg {pull}36019[#36019] (issues: {issue}34555[#34555], {issue}35985[#35985]) +* Correct implemented interface of ParsedReverseNested {pull}35455[#35455] (issue: {issue}35449[#35449]) +* Handle IndexOrDocValuesQuery in composite aggregation {pull}35392[#35392] +* Don't load global ordinals with the `map` execution_hint {pull}37833[#37833] (issue: {issue}37705[#37705]) +* Issue #37303 - Invalid variance fix {pull}37384[#37384] (issue: {issue}37303[#37303]) +* Skip sibling pipeline aggregators reduction during non-final reduce {pull}40101[#40101] (issue: {issue}40059[#40059]) +* Extend nextDoc to delegate to the wrapped doc-value iterator for date_nanos {pull}39176[#39176] (issue: {issue}39107[#39107]) +* Only create MatrixStatsResults on final reduction {pull}38130[#38130] (issue: {issue}37587[#37587]) + +Allocation:: +* Fix _host based require filters {pull}38173[#38173] +* ALLOC: Fail Stale Primary Alloc. Req. without Data {pull}37226[#37226] (issue: {issue}37098[#37098]) + +Analysis:: +* Close #26771: beider_morse phonetic encoder failure when languageset unspecified {pull}26848[#26848] (issue: {issue}26771[#26771]) +* Fix PreConfiguredTokenFilters getSynonymFilter() implementations {pull}38839[#38839] (issue: {issue}38793[#38793]) + +Audit:: +* Fix origin.type for connection_* events {pull}36410[#36410] +* Fix IndexAuditTrail rolling restart on rollover edge {pull}35988[#35988] (issue: {issue}33867[#33867]) +* Fix NPE in Logfile Audit Filter {pull}38120[#38120] (issue: {issue}38097[#38097]) +* LoggingAuditTrail correctly handle ReplicatedWriteRequest {pull}39925[#39925] (issue: {issue}39555[#39555]) + +Authorization:: +* Empty GetAliases authorization fix {pull}34444[#34444] (issue: {issue}31952[#31952]) + +Authentication:: +* Fix kerberos setting registration {pull}35986[#35986] (issues: {issue}30241[#30241], {issue}35942[#35942]) +* Add support for Kerberos V5 Oid {pull}35764[#35764] (issue: {issue}34763[#34763]) +* Enhance parsing of StatusCode in SAML Responses {pull}38628[#38628] +* Limit token expiry to 1 hour maximum {pull}38244[#38244] +* Fix expired token message in Exception header {pull}37196[#37196] +* Fix NPE in CachingUsernamePasswordRealm {pull}36953[#36953] (issue: {issue}36951[#36951]) +* Allow non super users to create API keys {pull}40028[#40028] (issue: {issue}40029[#40029]) +* Use consistent view of realms for authentication {pull}38815[#38815] (issue: {issue}30301[#30301]) +* Correct authenticate response for API key {pull}39684[#39684] +* Fix security index auto-create and state recovery race {pull}39582[#39582] + +Build:: +* Use explicit deps on test tasks for check {pull}36325[#36325] +* Fix jdbc jar pom to not include deps {pull}36036[#36036] (issue: {issue}32014[#32014]) +* Fix official plugins list {pull}35661[#35661] (issue: {issue}35623[#35623]) + +CCR:: +* Fix follow stats API's follower index filtering feature {pull}36647[#36647] +* AutoFollowCoordinator should tolerate that auto follow patterns may be removed {pull}35945[#35945] (issue: {issue}35937[#35937]) +* Only auto follow indices when all primary shards have started {pull}35814[#35814] (issue: {issue}35480[#35480]) +* Avoid NPE in follower stats when no tasks metadata {pull}35802[#35802] +* Fix the names of CCR stats endpoints in usage API {pull}35438[#35438] +* Prevent CCR recovery from missing documents {pull}38237[#38237] +* Fix file reading in ccr restore service {pull}38117[#38117] +* Correct argument names in update mapping/settings from leader {pull}38063[#38063] +* Ensure changes requests return the latest mapping version {pull}37633[#37633] +* Do not set fatal exception when shard follow task is stopped. {pull}37603[#37603] +* Add fatal_exception field for ccr stats in monitoring mapping {pull}37563[#37563] +* Do not add index event listener if CCR disabled {pull}37432[#37432] +* When removing an AutoFollower also mark it as removed. {pull}37402[#37402] (issue: {issue}36761[#36761]) +* Make shard follow tasks more resilient for restarts {pull}37239[#37239] (issue: {issue}37231[#37231]) +* Resume follow Api should not require a request body {pull}37217[#37217] (issue: {issue}37022[#37022]) +* Report error if auto follower tries auto follow a leader index with soft deletes disabled {pull}36886[#36886] (issue: {issue}33007[#33007]) +* Remote cluster license checker and no license info. {pull}36837[#36837] (issue: {issue}36815[#36815]) +* Make CCR resilient against missing remote cluster connections {pull}36682[#36682] (issues: {issue}36255[#36255], {issue}36667[#36667]) +* AutoFollowCoordinator and follower index already created {pull}36540[#36540] (issue: {issue}33007[#33007]) +* Safe publication of AutoFollowCoordinator {pull}40153[#40153] (issue: {issue}38560[#38560]) +* Enable reading auto-follow patterns from x-content {pull}40130[#40130] (issue: {issue}40128[#40128]) +* Stop auto-followers on shutdown {pull}40124[#40124] +* Protect against the leader index being removed {pull}39351[#39351] (issue: {issue}39308[#39308]) +* Handle the fact that `ShardStats` instance may have no commit or seqno stats {pull}38782[#38782] (issue: {issue}38779[#38779]) +* Fix LocalIndexFollowingIT#testRemoveRemoteConnection() test {pull}38709[#38709] (issue: {issue}38695[#38695]) +* Fix shard follow task startup error handling {pull}39053[#39053] (issue: {issue}38779[#38779]) +* Filter out upgraded version index settings when starting index following {pull}38838[#38838] (issue: {issue}38835[#38835]) + +Circuit Breakers:: +* Modify `BigArrays` to take name of circuit breaker {pull}36461[#36461] (issue: {issue}31435[#31435]) + +Core:: +* Fix CompositeBytesReference#slice to not throw AIOOBE with legal offsets. {pull}35955[#35955] (issue: {issue}35950[#35950]) +* Suppress CachedTimeThread in hot threads output {pull}35558[#35558] (issue: {issue}23175[#23175]) +* Upgrade to Joda 2.10.1 {pull}35410[#35410] (issue: {issue}33749[#33749]) + +CRUD:: +* Fix Reindex from remote query logic {pull}36908[#36908] +* Synchronize WriteReplicaResult callbacks {pull}36770[#36770] +* Cascading primary failure lead to MSU too low {pull}40249[#40249] +* Store Pending Deletions Fix {pull}40345[#40345] (issue: {issue}40249[#40249]) +* ShardBulkAction ignore primary response on primary {pull}38901[#38901] + +Cluster Coordination:: +* Fix node tool cleanup {pull}39389[#39389] +* Avoid serialising state if it was already serialised {pull}39179[#39179] +* Do not perform cleanup if Manifest write fails with dirty exception {pull}40519[#40519] (issue: {issue}39077[#39077]) +* Cache compressed cluster state size {pull}39827[#39827] (issue: {issue}39806[#39806]) +* Drop node if asymmetrically partitioned from master {pull}39598[#39598] +* Fixing the custom object serialization bug in diffable utils. {pull}39544[#39544] +* Clean GatewayAllocator when stepping down as master {pull}38885[#38885] + +Distributed:: +* Combine the execution of an exclusive replica operation with primary term update {pull}36116[#36116] (issue: {issue}35850[#35850]) +* ActiveShardCount should not fail when closing the index {pull}35936[#35936] +* TransportVerifyShardBeforeCloseAction should force a flush {pull}38401[#38401] (issues: {issue}33888[#33888], {issue}37961[#37961]) +* Fix limit on retaining sequence number {pull}37992[#37992] (issue: {issue}37165[#37165]) +* Close Index API should force a flush if a sync is needed {pull}37961[#37961] (issues: {issue}33888[#33888], {issue}37426[#37426]) +* Force Refresh Listeners when Acquiring all Operation Permits {pull}36835[#36835] +* Replaced the word 'shards' with 'replicas' in an error message. (#36234) {pull}36275[#36275] (issue: {issue}36234[#36234]) +* Ignore waitForActiveShards when syncing leases {pull}39224[#39224] (issue: {issue}39089[#39089]) +* Fix synchronization in LocalCheckpointTracker#contains {pull}38755[#38755] (issues: {issue}33871[#33871], {issue}38633[#38633]) +* Enforce retention leases require soft deletes {pull}39922[#39922] (issue: {issue}39914[#39914]) +* Treat TransportService stopped error as node is closing {pull}39800[#39800] (issue: {issue}39584[#39584]) +* Use cause to determine if node with primary is closing {pull}39723[#39723] (issue: {issue}39584[#39584]) +* Don’t ack if unable to remove failing replica {pull}39584[#39584] (issue: {issue}39467[#39467]) +* Fix NPE on Stale Index in IndicesService {pull}38891[#38891] (issue: {issue}38845[#38845]) + +Engine:: +* Set Lucene version upon index creation. {pull}36038[#36038] (issue: {issue}33826[#33826]) +* Wrap can_match reader with ElasticsearchDirectoryReader {pull}35857[#35857] +* Copy checkpoint atomically when rolling generation {pull}35407[#35407] +* Subclass NIOFSDirectory instead of using FileSwitchDirectory {pull}37140[#37140] (issues: {issue}36668[#36668], {issue}37111[#37111]) +* Bubble up exception when processing NoOp {pull}39338[#39338] (issue: {issue}38898[#38898]) +* ReadOnlyEngine should update translog recovery state information {pull}39238[#39238] +* Advance max_seq_no before add operation to Lucene {pull}38879[#38879] (issue: {issue}31629[#31629]) + +Features/Features:: +* Only count some fields types for deprecation check {pull}40166[#40166] +* Deprecation check for indices with very large numbers of fields {pull}39869[#39869] (issue: {issue}39851[#39851]) + +Features/ILM:: +* Preserve ILM operation mode when creating new lifecycles {pull}38134[#38134] (issues: {issue}38229[#38229], {issue}38230[#38230]) +* Retry ILM steps that fail due to SnapshotInProgressException {pull}37624[#37624] (issues: {issue}37541[#37541], {issue}37552[#37552]) +* Remove `indexing_complete` when removing policy {pull}36620[#36620] +* Handle failure to release retention leases in ILM {pull}39281[#39281] (issue: {issue}39181[#39181]) +* Correct ILM metadata minimum compatibility version {pull}40569[#40569] (issue: {issue}40565[#40565]) +* Handle null retention leases in WaitForNoFollowersStep {pull}40477[#40477] +* Allow ILM to stop if indices have nonexistent policies {pull}40820[#40820] (issue: {issue}40824[#40824]) + +Features/Indices APIs:: +* Validate top-level keys for create index request (#23755) {pull}23869[#23869] (issue: {issue}23755[#23755]) +* Reject delete index requests with a body {pull}37501[#37501] (issue: {issue}8217[#8217]) +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Get Aliases with wildcard exclusion expression {pull}34230[#34230] (issues: {issue}33518[#33518], {issue}33805[#33805], {issue}34144[#34144]) + +Features/Ingest:: +* Fix Deprecation Warning in Script Proc. {pull}32407[#32407] +* Support unknown fields in ingest pipeline map configuration {pull}38352[#38352] (issue: {issue}36938[#36938]) +* Ingest node - user_agent, move device parsing to an object {pull}38115[#38115] (issues: {issue}37329[#37329], {issue}38094[#38094]) +* Fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* Support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) +* Ingest ingest then create index {pull}39607[#39607] (issues: {issue}32758[#32758], {issue}32786[#32786], {issue}36545[#36545]) + +Features/Java High Level REST Client:: +* Drop extra level from user parser {pull}34932[#34932] +* Update IndexTemplateMetaData to allow unknown fields {pull}38448[#38448] (issue: {issue}36938[#36938]) +* `if_seq_no` and `if_primary_term` parameters aren't wired correctly in REST Client's CRUD API {pull}38411[#38411] +* Update Rollup Caps to allow unknown fields {pull}38339[#38339] (issue: {issue}36938[#36938]) +* Fix ILM explain response to allow unknown fields {pull}38054[#38054] (issue: {issue}36938[#36938]) +* Fix ILM status to allow unknown fields {pull}38043[#38043] (issue: {issue}36938[#36938]) +* Fix ILM Lifecycle Policy to allow unknown fields {pull}38041[#38041] (issue: {issue}36938[#36938]) +* Update authenticate to allow unknown fields {pull}37713[#37713] (issue: {issue}36938[#36938]) +* Update verify repository to allow unknown fields {pull}37619[#37619] (issue: {issue}36938[#36938]) +* Update get users to allow unknown fields {pull}37593[#37593] (issue: {issue}36938[#36938]) +* Update Execute Watch to allow unknown fields {pull}37498[#37498] (issue: {issue}36938[#36938]) +* Update Put Watch to allow unknown fields {pull}37494[#37494] (issue: {issue}36938[#36938]) +* Update Delete Watch to allow unknown fields {pull}37435[#37435] (issue: {issue}36938[#36938]) +* Fix rest reindex test for IPv4 addresses {pull}37310[#37310] +* Fix weighted_avg parser not found for RestHighLevelClient {pull}37027[#37027] (issue: {issue}36861[#36861]) + +Features/Java Low Level REST Client:: +* Remove I/O pool blocking sniffing call from onFailure callback, add some logic around host exclusion {pull}27985[#27985] (issue: {issue}27984[#27984]) +* Fix potential IllegalCapacityException in LLRC when selecting nodes {pull}37821[#37821] + +Features/Monitoring:: +* Allow built-in monitoring_user role to call GET _xpack API {pull}38060[#38060] (issue: {issue}37970[#37970]) +* Don't emit deprecation warnings on calls to the monitoring bulk API. {pull}39805[#39805] (issue: {issue}39336[#39336]) + +Features/Watcher:: +* Ignore system locale/timezone in croneval CLI tool {pull}33215[#33215] +* Support merge nested Map in list for JIRA configurations {pull}37634[#37634] (issue: {issue}30068[#30068]) +* Watcher accounts constructed lazily {pull}36656[#36656] +* Ensures watch definitions are valid json {pull}30692[#30692] (issue: {issue}29746[#29746]) +* Use non-ILM template setting up watch history template & ILM disabled {pull}39325[#39325] (issue: {issue}38805[#38805]) +* Only flush Watcher's bulk processor if Watcher is enabled {pull}38803[#38803] (issue: {issue}38798[#38798]) +* Fix Watcher stats class cast exception {pull}39821[#39821] (issue: {issue}39780[#39780]) +* Use any index specified by .watches for Watcher {pull}39541[#39541] (issue: {issue}39478[#39478]) +* Resolve concurrency with watcher trigger service {pull}39092[#39092] (issue: {issue}39087[#39087]) +* Metric on watcher stats is a list not an enum {pull}39114[#39114] + +Geo:: +* Test `GeoShapeQueryTests#testPointsOnly` fails {pull}27454[#27454] +* More robust handling of ignore_malformed in geoshape parsing {pull}35603[#35603] (issues: {issue}34047[#34047], {issue}34498[#34498]) +* Better handling of malformed geo_points {pull}35554[#35554] (issue: {issue}35419[#35419]) +* Enables coerce support in WKT polygon parser {pull}35414[#35414] (issue: {issue}35059[#35059]) +* Fix GeoHash PrefixTree BWC {pull}38584[#38584] (issue: {issue}38494[#38494]) +* Do not normalize the longitude with value -180 for Lucene shapes {pull}37299[#37299] (issue: {issue}37297[#37297]) +* Geo Point parse error fix {pull}40447[#40447] (issue: {issue}17617[#17617]) + +Highlighting:: +* Bug fix for AnnotatedTextHighlighter - port of 39525 {pull}39750[#39750] (issue: {issue}39525[#39525]) + +Infra/Core:: +* Ensure shard is refreshed once it's inactive {pull}27559[#27559] (issue: {issue}27500[#27500]) +* Bubble-up exceptions from scheduler {pull}38317[#38317] (issue: {issue}38014[#38014]) +* Revert back to joda's multi date formatters {pull}36814[#36814] (issues: {issue}36447[#36447], {issue}36602[#36602]) +* Propagate Errors in executors to uncaught exception handler {pull}36137[#36137] (issue: {issue}28667[#28667]) +* Correct name of basic_date_time_no_millis {pull}39367[#39367] +* Allow single digit milliseconds in strict date parsing {pull}40676[#40676] (issue: {issue}40403[#40403]) +* Parse composite patterns using ClassicFormat.parseObject {pull}40100[#40100] (issue: {issue}39916[#39916]) +* Bat scripts to work with JAVA_HOME with parantheses {pull}39712[#39712] (issues: {issue}30606[#30606], {issue}33405[#33405], {issue}38578[#38578], {issue}38624[#38624]) +* Change licence expiration date pattern {pull}39681[#39681] (issue: {issue}39136[#39136]) +* Fix DateFormatters.parseMillis when no timezone is given {pull}39100[#39100] (issue: {issue}39067[#39067]) +* Don't close caches while there might still be in-flight requests. {pull}38958[#38958] (issue: {issue}37117[#37117]) +* Allow single digit milliseconds in strict date parsing {pull}40676[#40676] (issue: {issue}40403[#40403]) + +Infra/Packaging:: +* Remove NOREPLACE for /etc/elasticsearch in rpm and deb {pull}37839[#37839] +* Packaging: Update marker used to allow ELASTIC_PASSWORD {pull}37243[#37243] (issue: {issue}37240[#37240]) +* Remove permission editing in postinst {pull}37242[#37242] (issue: {issue}37143[#37143]) +* Some elasticsearch-cli tools could not be run not from ES_HOME {pull}39937[#39937] +* Obsolete pre 7.0 noarch package in rpm {pull}39472[#39472] (issue: {issue}39414[#39414]) +* Suppress error message when `/proc/sys/vm/max_map_count` is not exists. {pull}35933[#35933] +* Use TAR instead of DOCKER build type before 6.7.0 {pull}40723[#40723] (issues: {issue}39378[#39378], {issue}40511[#40511]) +* Source additional files correctly in elasticsearch-cli {pull}40890[#40890] (issue: {issue}40889[#40889]) + +Infra/REST API:: +* Reject all requests that have an unconsumed body {pull}37504[#37504] (issues: {issue}30792[#30792], {issue}37501[#37501], {issue}8217[#8217]) +* Fix #38623 remove xpack namespace REST API {pull}38625[#38625] +* Remove the "xpack" namespace from the REST API {pull}38623[#38623] +* Update spec files that erroneously documented parts as optional {pull}39122[#39122] +* ilm.explain_lifecycle documents human again {pull}39113[#39113] +* Index on rollup.rollup_search.json is a list {pull}39097[#39097] + +Infra/Scripting:: +* Fix Painless void return bug {pull}38046[#38046] +* Correct bug in ScriptDocValues {pull}40488[#40488] + +Infra/Settings:: +* Change format how settings represent lists / array {pull}26723[#26723] +* Fix setting by time unit {pull}37192[#37192] +* Fix handling of fractional byte size value settings {pull}37172[#37172] +* Fix handling of fractional time value settings {pull}37171[#37171] + +Infra/Transport API:: +* Remove version read/write logic in Verify Response {pull}30879[#30879] (issue: {issue}30807[#30807]) +* Enable muted Repository test {pull}30875[#30875] (issue: {issue}30807[#30807]) +* Bad regex in CORS settings should throw a nicer error {pull}29108[#29108] + +Index APIs:: +* Fix duplicate phrase in shrink/split error message {pull}36734[#36734] (issue: {issue}36729[#36729]) +* Raise a 404 exception when document source is not found (#33384) {pull}34083[#34083] (issue: {issue}33384[#33384]) + +Ingest:: +* Fix on_failure with Drop processor {pull}36686[#36686] (issue: {issue}36151[#36151]) +* Support default pipelines + bulk upserts {pull}36618[#36618] (issue: {issue}36219[#36219]) +* Support default pipeline through an alias {pull}36231[#36231] (issue: {issue}35817[#35817]) + +License:: +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) +* Do not serialize basic license exp in x-pack info {pull}30848[#30848] +* Update versions for start_trial after backport {pull}30218[#30218] (issue: {issue}30135[#30135]) + +Machine Learning:: +* Interrupt Grok in file structure finder timeout {pull}36588[#36588] +* Prevent stack overflow while copying ML jobs and datafeeds {pull}36370[#36370] (issue: {issue}36360[#36360]) +* Adjust file structure finder parser config {pull}35935[#35935] +* Fix find_file_structure NPE with should_trim_fields {pull}35465[#35465] (issue: {issue}35462[#35462]) +* Prevent notifications being created on deletion of a non existent job {pull}35337[#35337] (issues: {issue}34058[#34058], {issue}35336[#35336]) +* Clear Job#finished_time when it is opened (#32605) {pull}32755[#32755] +* Fix thread leak when waiting for job flush (#32196) {pull}32541[#32541] (issue: {issue}32196[#32196]) +* Fix CPoissonMeanConjugate sampling error. {ml-pull}335[#335] +* Report index unavailable instead of waiting for lazy node {pull}38423[#38423] +* Fix error race condition on stop _all datafeeds and close _all jobs {pull}38113[#38113] (issue: {issue}37959[#37959]) +* Update ML results mappings on process start {pull}37706[#37706] (issue: {issue}37607[#37607]) +* Prevent submit after autodetect worker is stopped {pull}37700[#37700] (issue: {issue}37108[#37108]) +* Fix ML datafeed CCS with wildcarded cluster name {pull}37470[#37470] (issue: {issue}36228[#36228]) +* Update error message for process update {pull}37363[#37363] +* Wait for autodetect to be ready in the datafeed {pull}37349[#37349] (issues: {issue}36810[#36810], {issue}37227[#37227]) +* Stop datafeeds running when their jobs are stale {pull}37227[#37227] (issue: {issue}36810[#36810]) +* Order GET job stats response by job id {pull}36841[#36841] (issue: {issue}36683[#36683]) +* Make GetJobStats work with arbitrary wildcards and groups {pull}36683[#36683] (issue: {issue}34745[#34745]) +* Fix datafeed skipping first bucket after lookback when aggs are … {pull}39859[#39859] (issue: {issue}39842[#39842]) +* Refactoring lazy query and agg parsing {pull}39776[#39776] (issue: {issue}39528[#39528]) +* Stop the ML memory tracker before closing node {pull}39111[#39111] (issue: {issue}37117[#37117]) +* Scrolling datafeed should clear scroll contexts on error {pull}40773[#40773] (issue: {issue}40772[#40772]) + +Mapping:: +* Ensure that field aliases cannot be used in multi-fields. {pull}32219[#32219] +* Treat put-mapping calls with `_doc` as a top-level key as typed calls. {pull}38032[#38032] +* Correct deprec log in RestGetFieldMappingAction {pull}37843[#37843] (issue: {issue}37667[#37667]) +* Restore a noop _all metadata field for 6x indices {pull}37808[#37808] (issue: {issue}37429[#37429]) +* Make sure PutMappingRequest accepts content types other than JSON. {pull}37720[#37720] +* Make sure to use the resolved type in DocumentMapperService#extractMappings. {pull}37451[#37451] (issue: {issue}36811[#36811]) +* Improve Precision for scaled_float {pull}37169[#37169] (issue: {issue}32570[#32570]) +* Make sure to accept empty unnested mappings in create index requests. {pull}37089[#37089] +* Stop automatically nesting mappings in index creation requests. {pull}36924[#36924] +* Rewrite SourceToParse with resolved docType {pull}36921[#36921] (issues: {issue}35790[#35790], {issue}36769[#36769]) +* Optimise rejection of out-of-range `long` values {pull}40325[#40325] (issues: {issue}26137[#26137], {issue}40323[#40323]) +* Make sure to reject mappings with type _doc when include_type_name is false. {pull}38270[#38270] (issue: {issue}38266[#38266]) + +Network:: +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Netty4SizeHeaderFrameDecoder error {pull}31057[#31057] +* Fix memory leak in http pipelining {pull}30815[#30815] (issue: {issue}30801[#30801]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) +* Always compress based on the settings {pull}36522[#36522] (issue: {issue}36399[#36399]) +* http.publish_host Should Contain CNAME {pull}32806[#32806] (issue: {issue}22029[#22029]) +* Adjust SSLDriver behavior for JDK11 changes {pull}32145[#32145] (issues: {issue}32122[#32122], {issue}32144[#32144]) +* Add TRACE, CONNECT, and PATCH http methods {pull}31035[#31035] (issue: {issue}31017[#31017]) +* Transport client: Don't validate node in handshake {pull}30737[#30737] (issue: {issue}30141[#30141]) +* Fix issue with finishing handshake in ssl driver {pull}30580[#30580] +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Release pipelined http responses on close {pull}26226[#26226] +* Reload SSL context on file change for LDAP {pull}36937[#36937] (issues: {issue}30509[#30509], {issue}36923[#36923]) +* Do not resolve addresses in remote connection info {pull}36671[#36671] (issue: {issue}35658[#35658]) + +Packaging:: +* Fix error message when package install fails due to missing Java {pull}36077[#36077] (issue: {issue}31845[#31845]) +* Add missing entries to conffiles {pull}35810[#35810] (issue: {issue}35691[#35691]) + +Plugins:: +* Ensure that azure stream has socket privileges {pull}28751[#28751] (issue: {issue}28662[#28662]) + +Ranking:: +* QueryRescorer should keep the window size when rewriting {pull}36836[#36836] + +Recovery:: +* Register ResyncTask.Status as a NamedWriteable {pull}36610[#36610] +* RecoveryMonitor#lastSeenAccessTime should be volatile {pull}36781[#36781] +* Create retention leases file during recovery {pull}39359[#39359] (issue: {issue}37165[#37165]) +* Recover peers from translog, ignoring soft deletes {pull}38904[#38904] (issue: {issue}37165[#37165]) +* Retain history for peer recovery using leases {pull}38855[#38855] +* Resync should not send operations without sequence number {pull}40433[#40433] + +Rollup:: +* Fix rollup search statistics {pull}36674[#36674] +* Fix Rollup's metadata parser {pull}36791[#36791] (issue: {issue}36726[#36726]) +* Fix rollup search statistics {pull}36674[#36674] +* Remove timezone validation on rollup range queries {pull}40647[#40647] +* Rollup ignores time_zone on date histogram {pull}40844[#40844] + +Scripting:: +* Properly support no-offset date formatting {pull}36316[#36316] (issue: {issue}36306[#36306]) +* [Painless] Generate Bridge Methods {pull}36097[#36097] +* Fix serialization bug in painless execute api request {pull}36075[#36075] (issue: {issue}36050[#36050]) +* Actually add joda time back to whitelist {pull}35965[#35965] (issue: {issue}35915[#35915]) +* Add back joda to whitelist {pull}35915[#35915] (issue: {issue}35913[#35913]) + +Settings:: +* Correctly Identify Noop Updates {pull}36560[#36560] (issue: {issue}36496[#36496]) + +Search:: +* Ensure realtime `_get` and `_termvectors` don't run on the network thread {pull}33814[#33814] (issue: {issue}27500[#27500]) +* [bug] fuzziness custom auto {pull}33462[#33462] (issue: {issue}33454[#33454]) +* Fix inner hits retrieval when stored fields are disabled (_none_) {pull}33018[#33018] (issue: {issue}32941[#32941]) +* Set maxScore for empty TopDocs to Nan rather than 0 {pull}32938[#32938] +* Handle leniency for cross_fields type in multi_match query {pull}27045[#27045] (issue: {issue}23210[#23210]) +* Raise IllegalArgumentException instead if query validation failed {pull}26811[#26811] (issue: {issue}26799[#26799]) +* Inner hits fail to propagate doc-value format. {pull}36310[#36310] +* Fix custom AUTO issue with Fuzziness#toXContent {pull}35807[#35807] (issue: {issue}33462[#33462]) +* Fix analyzed prefix query in query_string {pull}35756[#35756] (issue: {issue}31702[#31702]) +* Fix problem with MatchNoDocsQuery in disjunction queries {pull}35726[#35726] (issue: {issue}34708[#34708]) +* Fix phrase_slop in query_string query {pull}35533[#35533] (issue: {issue}35125[#35125]) +* Add a More Like This query routing requirement check (#29678) {pull}33974[#33974] +* Look up connection using the right cluster alias when releasing contexts {pull}38570[#38570] +* Fix fetch source option in expand search phase {pull}37908[#37908] (issue: {issue}23829[#23829]) +* Change `rational` to `saturation` in script_score {pull}37766[#37766] (issue: {issue}37714[#37714]) +* Throw if two inner_hits have the same name {pull}37645[#37645] (issue: {issue}37584[#37584]) +* Ensure either success or failure path for SearchOperationListener is called {pull}37467[#37467] (issue: {issue}37185[#37185]) +* `query_string` should use indexed prefixes {pull}36895[#36895] +* Avoid duplicate types deprecation messages in search-related APIs. {pull}36802[#36802] +* Serialize top-level pipeline aggs as part of InternalAggregations {pull}40177[#40177] (issues: {issue}40059[#40059], {issue}40101[#40101]) +* CCS: Skip empty search hits when minimizing round-trips {pull}40098[#40098] (issues: {issue}32125[#32125], {issue}40067[#40067]) +* CCS: Disable minimizing round-trips when dfs is requested {pull}40044[#40044] (issue: {issue}32125[#32125]) +* Fix Fuzziness#asDistance(String) {pull}39643[#39643] (issue: {issue}39614[#39614]) +* Fix alias resolution runtime complexity. {pull}40263[#40263] (issue: {issue}40248[#40248]) + +Security:: +* Handle 6.4.0+ BWC for Application Privileges {pull}32929[#32929] +* Remove license state listeners on closeables {pull}36308[#36308] (issues: {issue}33328[#33328], {issue}35627[#35627], {issue}35628[#35628]) +* Fix exit code for Security CLI tools {pull}37956[#37956] (issue: {issue}37841[#37841]) +* Fix potential NPE in UsersTool {pull}37660[#37660] +* Remove dynamic objects from security index {pull}40499[#40499] (issue: {issue}35460[#35460]) +* Fix libs:ssl-config project setup {pull}39074[#39074] +* Do not create the missing index when invoking getRole {pull}39039[#39039] + +Snapshot/Restore:: +* Upgrade GCS Dependencies to 1.55.0 {pull}36634[#36634] (issues: {issue}35229[#35229], {issue}35459[#35459]) +* Improve Resilience SnapshotShardService {pull}36113[#36113] (issue: {issue}32265[#32265]) +* Keep SnapshotsInProgress State in Sync with Routing Table {pull}35710[#35710] +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) +* Fix Concurrent Snapshot Ending And Stabilize Snapshot Finalization {pull}38368[#38368] (issue: {issue}38226[#38226]) +* Fix Two Races that Lead to Stuck Snapshots {pull}37686[#37686] (issues: {issue}32265[#32265], {issue}32348[#32348]) +* Fix Race in Concurrent Snapshot Delete and Create {pull}37612[#37612] (issue: {issue}37581[#37581]) +* Streamline S3 Repository- and Client-Settings {pull}37393[#37393] +* Blob store compression fix {pull}39073[#39073] + +SQL:: +* Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* Fix translation to painless for conditionals {pull}36636[#36636] (issue: {issue}36631[#36631]) +* Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* Fix MOD() for long and integer arguments {pull}36599[#36599] (issue: {issue}36364[#36364]) +* Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) +* Be lenient for tests involving comparison to H2 but strict for csv spec tests {pull}36498[#36498] (issue: {issue}36483[#36483]) +* Non ISO 8601 versions of DAY_OF_WEEK and WEEK_OF_YEAR functions {pull}36358[#36358] (issue: {issue}36263[#36263]) +* Do not ignore all fields whose names start with underscore {pull}36214[#36214] (issue: {issue}36206[#36206]) +* Fix issue with wrong data type for scripted Grouping keys {pull}35969[#35969] (issue: {issue}35662[#35662]) +* Fix translation of math functions to painless {pull}35910[#35910] (issue: {issue}35654[#35654]) +* Fix jdbc jar to include deps {pull}35602[#35602] +* Fix query translation for scripted queries {pull}35408[#35408] (issue: {issue}35232[#35232]) +* Clear the cursor if nested inner hits are enough to fulfill the query required limits {pull}35398[#35398] (issue: {issue}35176[#35176]) +* Introduce IsNull node to simplify expressions {pull}35206[#35206] (issues: {issue}34876[#34876], {issue}35171[#35171]) +* The SSL default configuration shouldn't override the https protocol if used {pull}34635[#34635] (issue: {issue}33817[#33817]) +* Minor fix for javadoc {pull}32573[#32573] (issue: {issue}32553[#32553]) +* Prevent grouping over grouping functions {pull}38649[#38649] (issue: {issue}38308[#38308]) +* Relax StackOverflow circuit breaker for constants {pull}38572[#38572] (issue: {issue}38571[#38571]) +* Fix issue with IN not resolving to underlying keyword field {pull}38440[#38440] (issue: {issue}38424[#38424]) +* Change the Intervals milliseconds precision to 3 digits {pull}38297[#38297] (issue: {issue}37423[#37423]) +* Fix esType for DATETIME/DATE and INTERVALS {pull}38179[#38179] (issue: {issue}38051[#38051]) +* Added SSL configuration options tests {pull}37875[#37875] (issue: {issue}37711[#37711]) +* Fix casting from date to numeric type to use millis {pull}37869[#37869] (issue: {issue}37655[#37655]) +* Fix BasicFormatter NPE {pull}37804[#37804] +* Return Intervals in SQL format for CLI {pull}37602[#37602] (issues: {issue}29970[#29970], {issue}36186[#36186], {issue}36432[#36432]) +* Fix object extraction from sources {pull}37502[#37502] (issue: {issue}37364[#37364]) +* Fix issue with field names containing "." {pull}37364[#37364] (issue: {issue}37128[#37128]) +* Fix bug regarding alias fields with dots {pull}37279[#37279] (issue: {issue}37224[#37224]) +* Proper handling of COUNT(field_name) and COUNT(DISTINCT field_name) {pull}37254[#37254] (issue: {issue}30285[#30285]) +* Fix COUNT DISTINCT filtering {pull}37176[#37176] (issue: {issue}37086[#37086]) +* Fix issue with wrong NULL optimization {pull}37124[#37124] (issue: {issue}35872[#35872]) +* Fix issue with complex expression as args of PERCENTILE/_RANK {pull}37102[#37102] (issue: {issue}37099[#37099]) +* Handle the bwc Joda ZonedDateTime scripting class in Painless {pull}37024[#37024] (issue: {issue}37023[#37023]) +* Fix bug regarding histograms usage in scripting {pull}36866[#36866] +* Fix issue with always false filter involving functions {pull}36830[#36830] (issue: {issue}35980[#35980]) +* Protocol returns ISO 8601 String formatted dates instead of Long for JDBC/ODBC requests {pull}36800[#36800] (issue: {issue}36756[#36756]) +* Enhance Verifier to prevent aggregate or grouping functions from {pull}36799[#36799] (issue: {issue}36798[#36798]) +* Fix translation of LIKE/RLIKE keywords {pull}36672[#36672] (issues: {issue}36039[#36039], {issue}36584[#36584]) +* Scripting support for casting functions CAST and CONVERT {pull}36640[#36640] (issue: {issue}36061[#36061]) +* Concat should be always not nullable {pull}36601[#36601] (issue: {issue}36169[#36169]) +* Fix issue with complex HAVING and GROUP BY ordinal {pull}36594[#36594] (issue: {issue}36059[#36059]) +* Add missing handling of IP field in JDBC {pull}40384[#40384] (issue: {issue}40358[#40358]) +* Fix metric aggs on date/time to not return double {pull}40377[#40377] (issues: {issue}39492[#39492], {issue}40376[#40376]) +* CAST supports both SQL and ES types {pull}40365[#40365] (issue: {issue}40282[#40282]) +* Fix RLIKE bug and improve testing for RLIKE statement {pull}40354[#40354] (issues: {issue}34609[#34609], {issue}39931[#39931]) +* Unwrap the first value in an array in case of array leniency {pull}40318[#40318] (issue: {issue}40296[#40296]) +* Preserve original source for cast/convert function {pull}40271[#40271] (issue: {issue}40239[#40239]) +* Fix LIKE function equality by considering its pattern as well {pull}40260[#40260] (issue: {issue}39931[#39931]) +* Fix issue with optimization on queries with ORDER BY/LIMIT {pull}40256[#40256] (issue: {issue}40211[#40211]) +* Rewrite ROUND and TRUNCATE functions with a different optional parameter handling method {pull}40242[#40242] (issue: {issue}40001[#40001]) +* Fix issue with getting DATE type in JDBC {pull}40207[#40207] +* Fix issue with date columns returned always in UTC {pull}40163[#40163] (issue: {issue}40152[#40152]) +* Add multi_value_field_leniency inside FieldHitExtractor {pull}40113[#40113] (issue: {issue}39700[#39700]) +* Fix incorrect ordering of groupings (GROUP BY) based on orderings (ORDER BY) {pull}40087[#40087] (issue: {issue}39956[#39956]) +* Fix bug with JDBC timezone setting and DATE type {pull}39978[#39978] (issue: {issue}39915[#39915]) +* Use underlying exact field for LIKE/RLIKE {pull}39443[#39443] (issue: {issue}39442[#39442]) +* Fix display size for DATE/DATETIME {pull}40669[#40669] +* Have LIKE/RLIKE use wildcard and regexp queries {pull}40628[#40628] (issue: {issue}40557[#40557]) +* Fix getTime() methods in JDBC {pull}40484[#40484] +* SYS TABLES: enumerate tables of requested types {pull}40535[#40535] (issue: {issue}40348[#40348]) +* Passing an input to the CLI "freezes" the CLI after displaying an error message {pull}40164[#40164] (issue: {issue}40557[#40557]) +* Wrap ZonedDateTime parameters inside scripts {pull}39911[#39911] (issue: {issue}39877[#39877]) +* ConstantProcessor can now handle NamedWriteable {pull}39876[#39876] (issue: {issue}39875[#39875]) +* Extend the multi dot field notation extraction to lists of values {pull}39823[#39823] (issue: {issue}39738[#39738]) +* Values in datetime script aggs should be treated as long {pull}39773[#39773] (issue: {issue}37042[#37042]) +* Don't allow inexact fields for MIN/MAX {pull}39563[#39563] (issue: {issue}39427[#39427]) +* Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547]) +* Fix COUNT DISTINCT column name {pull}39537[#39537] (issue: {issue}39511[#39511]) +* Enable accurate hit tracking on demand {pull}39527[#39527] (issue: {issue}37971[#37971]) +* Ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471]) +* Enforce JDBC driver - ES server version parity {pull}38972[#38972] (issue: {issue}38775[#38775]) +* Fall back to using the field name for column label {pull}38842[#38842] (issue: {issue}38831[#38831]) + +Suggesters:: +* Fix duplicate removal when merging completion suggestions {pull}36996[#36996] (issue: {issue}35836[#35836]) + +Task Management:: +* Un-assign persistent tasks as nodes exit the cluster {pull}37656[#37656] + +Watcher:: +* Watcher accounts constructed lazily {pull}36656[#36656] +* Only trigger a watch if new or schedule/changed {pull}35908[#35908] +* Fix Watcher NotificationService's secure settings {pull}35610[#35610] (issue: {issue}35378[#35378]) +* Fix integration tests to ensure correct start/stop of Watcher {pull}35271[#35271] (issues: {issue}29877[#29877], {issue}30705[#30705], {issue}33291[#33291], {issue}34448[#34448], {issue}34462[#34462]) + +ZenDiscovery:: +* Remove duplicate discovered peers {pull}35505[#35505] +* Respect the no_master_block setting {pull}36478[#36478] +* Cancel GetDiscoveredNodesAction when bootstrapped {pull}36423[#36423] (issues: {issue}36380[#36380], {issue}36381[#36381]) +* Only elect master-eligible nodes {pull}35996[#35996] +* Remove duplicate discovered peers {pull}35505[#35505] +* Fix size of rolling-upgrade bootstrap config {pull}38031[#38031] +* Always return metadata version if metadata is requested {pull}37674[#37674] +* Elect freshest master in upgrade {pull}37122[#37122] (issue: {issue}40[#40]) +* Fix cluster state persistence for single-node discovery {pull}36825[#36825] + +[[regression-7.0.0]] +[float] +=== Regressions + +Infra/Core:: +* Restore date aggregation performance in UTC case {pull}38221[#38221] (issue: {issue}37826[#37826]) +* Speed up converting of temporal accessor to zoned date time {pull}37915[#37915] (issue: {issue}37826[#37826]) + +Mapping:: +* Performance fix. Reduce deprecation calls for the same bulk request {pull}37415[#37415] (issue: {issue}37411[#37411]) + +Scripting:: +* Use Number as a return value for BucketAggregationScript {pull}35653[#35653] (issue: {issue}35351[#35351]) + +[[upgrade-7.0.0]] +[float] +=== Upgrades + +Discovery-Plugins:: +* Bump jackson-databind version for AWS SDK {pull}39183[#39183] + +Engine:: +* Upgrade to lucene-8.0.0-snapshot-83f9835. {pull}37668[#37668] +* Upgrade to Lucene 8.0.0-snapshot-ff9509a8df {pull}39350[#39350] +* Upgrade to Lucene 8.0.0 {pull}39992[#39992] (issue: {issue}39640[#39640]) + +Geo:: +* Upgrade JTS to 1.14.0 {pull}29141[#29141] (issue: {issue}29122[#29122]) + +Ingest:: +* Update geolite2 database in ingest geoip plugin {pull}33840[#33840] +* Bump jackson-databind version for ingest-geoip {pull}39182[#39182] + +Infra/Core:: +* Upgrade to a Lucene 8 snapshot {pull}33310[#33310] (issues: {issue}32899[#32899], {issue}33028[#33028], {issue}33309[#33309]) + +Security:: +* Upgrade the bouncycastle dependency to 1.61 {pull}40017[#40017] (issue: {issue}40011[#40011]) + +Search:: +* Upgrade to Lucene 8.0.0 GA {pull}39992[#39992] (issue: {issue}39640[#39640]) + +Snapshot/Restore:: +* plugins/repository-gcs: Update google-cloud-storage/core to 1.59.0 {pull}39748[#39748] (issue: {issue}39366[#39366]) + +Network:: +* Fix Netty Leaks by upgrading to 4.1.28 {pull}32511[#32511] (issue: {issue}32487[#32487]) +* Upgrade Netty 4.3.32.Final {pull}36102[#36102] (issue: {issue}35360[#35360]) + +Machine Learning:: +* No need to add state doc mapping on job open in 7.x {pull}37759[#37759] diff --git a/docs/reference/release-notes/7.1.asciidoc b/docs/reference/release-notes/7.1.asciidoc new file mode 100644 index 0000000000000..ec93927513b85 --- /dev/null +++ b/docs/reference/release-notes/7.1.asciidoc @@ -0,0 +1,61 @@ +[[release-notes-7.1.1]] +== {es} version 7.1.1 + +Also see <>. + +[[bug-7.1.1]] +[float] +=== Bug fixes + +Distributed:: +* Avoid unnecessary persistence of retention leases {pull}42299[#42299] +* Execute actions under permit in primary mode only {pull}42241[#42241] (issues: {issue}40386[#40386], {issue}41686[#41686]) + +Infra/REST API:: +* Remove deprecated _source_exclude and _source_include from get API spec {pull}42188[#42188] + +[[release-notes-7.1.0]] +== {es} version 7.1.0 + +Also see <>. + +[[enhancement-7.1.0]] +[float] +=== Enhancements + +Security:: +* Moved some security features to basic. See <> + +Authentication:: +* Log warning when unlicensed realms are skipped {pull}41778[#41778] + +Infra/Settings:: +* Drop distinction in entries for keystore {pull}41701[#41701] + + +[[bug-7.1.0]] +[float] +=== Bug fixes + +Cluster Coordination:: +* Handle serialization exceptions during publication {pull}41781[#41781] (issue: {issue}41090[#41090]) + +Infra/Core:: +* Fix fractional seconds for strict_date_optional_time {pull}41871[#41871] (issue: {issue}41633[#41633]) + +Network:: +* Enforce transport TLS on Basic with Security {pull}42150[#42150] + +Reindex:: +* Allow reindexing into write alias {pull}41677[#41677] (issue: {issue}41667[#41667]) + +SQL:: +* SQL: Fix issue regarding INTERVAL * number {pull}42014[#42014] (issue: {issue}41239[#41239]) +* SQL: Remove CircuitBreaker from parser {pull}41835[#41835] (issue: {issue}41471[#41471]) + +Search:: +* Fix IAE on cross_fields query introduced in 7.0.1 {pull}41938[#41938] (issues: {issue}41125[#41125], {issue}41934[#41934]) + + + + diff --git a/docs/reference/release-notes/highlights-7.0.0.asciidoc b/docs/reference/release-notes/highlights-7.0.0.asciidoc index d01d543c8257e..469e049134072 100644 --- a/docs/reference/release-notes/highlights-7.0.0.asciidoc +++ b/docs/reference/release-notes/highlights-7.0.0.asciidoc @@ -4,6 +4,371 @@ 7.0.0 ++++ -coming[7.0.0] +See also <> and +<>. -See also <> and <>. +//NOTE: The notable-highlights tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-highlights[] +[float] +==== Adaptive replica selection enabled by default + +In Elasticsearch 6.x and prior, a series of search requests to the same shard +would be forwarded to the primary and each replica in round-robin fashion. This +could prove problematic if one node starts a long garbage collection --- search +requests could still be forwarded to the slow node regardless and would have an +impact on search latency. + +In 6.1, we added an experimental +{ref}/search.html#search-adaptive-replica[adaptive replica selection] feature. +Each node tracks and compares how long search requests to +other nodes take, and uses this information to adjust how frequently to send +requests to shards on particular nodes. In our benchmarks, this results in an +overall improvement in search throughput and reduced 99th percentile latencies. + +This option was disabled by default throughout 6.x, but we’ve heard feedback +from our users that have found the setting to be very beneficial, so we’ve +turned it on by default starting in Elasticsearch 7.0.0. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Skip shard refreshes if a shard is "search idle" + +Elasticsearch 6.x and prior {ref}/indices-refresh.html[refreshed] indices +automatically in the background, by default every second. This provides the +“near real-time” search capabilities Elasticsearch is known for: results are +available for search requests within one second after they'd been added, by +default. However, this behavior has a significant impact on indexing performance +if the refreshes are not needed, (e.g., if Elasticsearch isn’t servicing any +active searches). + +Elasticsearch 7.0 is much smarter about this behavior by introducing the +notion of a shard being "search idle". A shard now transitions to being search +idle after it hasn't had any searches for +{ref}/index-modules.html#dynamic-index-settings[thirty seconds], by default. +Once a shard is search idle, all scheduled refreshes will +be skipped until a search comes through, which will trigger the next scheduled +refresh. We know that this is going to significantly increase the indexing +throughput for many users. The new behavior is only applied if there is no +explicit {ref}/index-modules.html#dynamic-index-settings[refresh interval set], +so do set the refresh +interval explicitly for any indices on which you prefer the old behavior. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Default to one shard + +One of the biggest sources of troubles we’ve seen over the years from our users +has been over-sharding and defaults play a big role in that. In Elasticsearch +6.x and prior, we defaulted to five shards by default per index. If you had one +daily index for ten different applications and each had the default of five +shards, you were creating fifty shards per day and it wasn't long before you had +thousands of shards even if you were only indexing a few gigabytes of data per +day. Index Lifecycle Management was a first step to help with this: providing +native rollover functions to create indexes by size instead of (just) by day and +built-in shrink functionality to shrink the number of shards per +index. Defaulting indices to one shard is the next step in helping to reduce +over-sharding. Of course, if you have another preferred primary shard count, you +can set it via the index settings. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Lucene 8 + +As with every major release, we look to support the latest major version of +Lucene, along with all the goodness that comes with it. That includes all the +developments that we contributed to the new Lucene version. Elasticsearch 7.0 +bundles Lucene 8, which is the latest version of Lucene. Lucene version 8 serves +as the foundation for many functional improvements in the rest of Elasticsearch, +including improved search performance for top-k queries and better ways to +combine relevance signals for your searches while still maintaining speed. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Introduce the ability to minimize round-trips in {ccs} + +In Elasticsearch 5.3, we released a feature called +{ref}/modules-cross-cluster-search.html[{ccs}] for users to query across multiple +clusters. We’ve since improved on the {ccs} framework, adding features to +ultimately use it to deprecate and replace tribe nodes as a way to federate +queries. In Elasticsearch 7.0, we’re adding a new execution mode for {ccs}: one +which has fewer round-trips when they aren't necessary. This mode +(`ccs_minimize_roundtrips`) can result in faster searches when the {ccs} query +spans high-latencies (e.g., across a WAN). +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== New cluster coordination implementation + +Since the beginning, we focused on making Elasticsearch easy to scale and +resilient to catastrophic failures. To support these requirements, we created a +pluggable cluster coordination system, with the default implementation known as +Zen Discovery. Zen Discovery was meant to be effortless, and give our users +peace of mind (as the name implies). The meteoric rise in Elasticsearch usage +has taught us a great deal. For instance, Zen's `minimum_master_nodes` setting +was often misconfigured, which put clusters at a greater risk of split brains +and losing data. Maintaining this setting across large and dynamically resizing +clusters was also difficult. + +In Elasticsearch 7.0, we have completely rethought and rebuilt the cluster +coordination layer. The new implementation gives safe sub-second master election +times, where Zen may have taken several seconds to elect a new master, valuable +time for a mission-critical deployment. With the `minimum_master_nodes` setting +removed, growing and shrinking clusters becomes safer and easier, and leaves +much less room to misconfigure the system. Most importantly, the new cluster +coordination layer gives us strong building blocks for the future of +Elasticsearch, ensuring we can build functionality for even more advanced +use-cases to come. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Better support for small heaps (the real-memory circuit breaker) + +Elasticsearch 7.0 adds an all-new {ref}/circuit-breaker.html[circuit breaker] +that keeps track of the total memory used by the JVM and will reject requests if +they would cause the reserved plus actual heap usage to exceed 95%. We'll also +be changing the default maximum buckets to return as part of an aggregation +(`search.max_buckets`) to 10,000, which is unbounded by default in 6.x and +prior. These two show great signs at seriously improving the out-of-memory +protection of Elasticsearch in 7.x, helping you keep your cluster alive even in +the face of adversarial or novice users running large queries and aggregations. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== {ccr-cap} is production-ready + +We introduced {ccr-cap} as a beta feature in Elasticsearch +6.5. {ccr-cap} was the most heavily requested features for Elasticsearch. We're +excited to announce {ccr-cap} is now generally available and ready for production use +in Elasticsearch 6.7 and 7.0! {ccr-cap} has a variety of use cases, including +cross-datacenter and cross-region replication, replicating data to get closer to +the application server and user, and maintaining a centralized reporting cluster +replicated from a large number of smaller clusters. + +In addition to maturing to a GA feature, there were a number of important +technical advancements in CCR for 6.7 and 7.0. Previous versions of {ccr-cap} required +replication to start on new indices only: existing indices could not be +replicated. {ccr-cap} can now start replicating existing indices that have soft +deletes enabled in 6.7 and 7.0, and new indices default to having soft deletes +enabled. We also introduced new technology to prevent a follower index from +falling fatally far behind its leader index. We’ve added a management UI in +Kibana for configuring remote clusters, indices to replicate, and index naming +patterns for automatic replication (e.g. for replicating `metricbeat-*` +indices). We've also added a monitoring UI for insight into {ccr} progress and +alerting on errors. Check out the Getting started with {ccr} +guide, or visit the reference documentation to learn more. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== {ilm-cap} is production-ready + +Index Lifecycle Management (ILM) was +https://www.elastic.co/blog/elastic-stack-6-6-0-released[released] as a beta +feature in Elasticsearch 6.6. We’ve officially moved ILM out of beta and into +GA, ready for production usage! ILM makes it easy to manage the lifecycle of +data in Elasticsearch, including how data progresses between +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/ilm-policy-definition.html[hot, warm, cold, and deletion phases]. +Specific rules regarding how data moves through these phases can be created via +APIs in Elasticsearch, or a beautiful management UI in Kibana. + +In Elasticsearch 6.7 and 7.0, ILM can now manage frozen indices. Frozen indices +are valuable for long term data storage in Elasticsearch, and require a smaller +amount of memory (heap) in relation to the amount of data managed by a node. In +6.7 and 7.0, +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/_actions.html[frozen indices] +can now be frozen as part of the cold phase in ILM. In addition, ILM now works +directly with Cross-Cluster Replication (CCR), which also GA’d in the +Elasticsearch 6.7 and 7.0 releases. The potential actions available in each ILM +phase can be found in the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/_actions.html[documentation]. +ILM is free to use and part of the default distribution of Elasticsearch. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== SQL is production-ready + +The SQL interface to Elasticsearch is now GA. +https://www.elastic.co/blog/elasticsearch-6-3-0-released[Introduced in 6.3] as +an alpha release, the SQL interface allows developers and data scientists +familiar with SQL to use the speed, scalability, and full-text power of +Elasticsearch that others know and love. It also allows BI tools using SQL to +easily access data in Elasticsearch. In addition to approving SQL access as a GA +feature in Elasticsearch, we’ve designated our +https://www.elastic.co/downloads/jdbc-client[JDBC] and +https://www.elastic.co/downloads/odbc-client[ODBC] drivers as GA. There are four +methods to access Elasticsearch SQL: through the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/sql-rest.html[Elasticsearch +REST endpoints], the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/sql-cli.html[Elasticsearch +SQL command line interface], the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/sql-jdbc.html[JDBC +driver], and the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/sql-odbc.html[ODBC +driver]. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== High-level REST client is feature-complete + +If you’ve been following our +https://www.elastic.co/blog/the-elasticsearch-java-high-level-rest-client-is-out[blog] +or our https://github.com/elastic/elasticsearch/issues/27205[GitHub repository], +you may be aware of a task we’ve been working on for quite a while now: creating +a next-generation Java client for accessing an Elasticsearch cluster. We +started off by working on the most commonly-used features like search and +aggregations, and have been working our way through administrative and +monitoring APIs. Many of you that use Java are already using this new client, +but for those that are still using the TransportClient, now is a great time to +upgrade to our High Level REST Client, or HLRC. + +As of 7.0.0, the HLRC now has all the API checkboxes checked to call it +“complete” so those of you still using the TransportClient should be able to +migrate. We’ll of course continue to develop our REST APIs and will add them to +this client as we go. For a list of all of the APIs that are available, have a +look at our +https://www.elastic.co/guide/en/elasticsearch/client/java-rest/7.0/java-rest-high.html[HLRC +documentation]. To get started, have a look at the +https://www.elastic.co/guide/en/elasticsearch/client/java-rest/7.0/java-rest-high-getting-started.html[getting +started with the HLRC] section of our docs and if you need help migrating from +the TransportClient, have a look at our +https://www.elastic.co/guide/en/elasticsearch/client/java-rest/7.0/java-rest-high-level-migration.html[migration +guide]. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Support nanosecond timestamps + +Up until 7.0 Elasticsearch could only store timestamps with millisecond +precision. If you wanted to process events that occur at a higher rate -- for +example if you want to store and analyze tracing or network packet data in +Elasticsearch -- you may want higher precision. Historically, we have used the +https://www.joda.org/joda-time/[Joda time library] to handle dates and times, +and Joda lacked support for such high precision timestamps. + +With JDK 8, an official Java time API has been introduced which can also handle +nanosecond precision timestamps and over the past year, we’ve been working to +migrate our Joda time usage to the native Java time while trying to maintain +backwards compatibility. As of 7.0.0, you can now make use of these nanosecond +timestamps via a dedicated +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/date_nanos.html[date_nanos +field mapper]. Note that aggregations are still on a millisecond resolution +with this field to avoid having an explosion of buckets. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Faster retrieval of top hits + +When it comes to search, query performance is a key feature. We have achieved a +significant improvement to search performance in Elasticsearch 7.0 for +situations in which the exact hit count is not needed and it is sufficient to +set a lower boundary to the number of results. For example, if your users +typically just look at the first page of results on your site and don’t care +about exactly how many documents matched, you may be able to show them “more +than 10,000 hits” and then provide them with paginated results. It’s quite +common to have users enter frequently-occurring terms like “the” and “a” in +their queries, which has historically forced Elasticsearch to score a lot of +documents even when those frequent terms couldn’t possibly add much to the +score. + +In these conditions Elasticsearch can now skip calculating scores for records +that are identified at an early stage as records that will not be ranked at the +top of the result set. This can significantly improve the query speed. The +actual number of top results that are scored is +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-track-total-hits.html[configurable], +but the default is 10,000. The behavior of queries that have a result set that +is smaller than this threshold will not change - i.e. the results count is +accurate but there is no performance improvement for queries that match a small +number of documents. Because the improvement is based on skipping low ranking +records, it does not apply to aggregations. You can read more about this +powerful algorithmic development in our blog post +https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Magic +WAND: Faster Retrieval of Top Hits in Elasticsearch]. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Support for TLS 1.3 + +Elasticsearch has supported encrypted communications for a long time, however, +we recently started https://www.elastic.co/support/matrix#matrix_jvm[supporting +JDK 11], which gives us new capabilities. JDK 11 now has TLSv1.3 support so +starting with 7.0, we’re now supporting TLSv1.3 within Elasticsearch for those +of you running JDK 11. In order to help new users from inadvertently running +with low security, we’ve also dropped TLSv1.0 from our defaults. For those +running older versions of Java, we have default options of TLSv1.2 and +TLSv1.1. Have a look at our +https://www.elastic.co/guide/en/elastic-stack-overview/7.0/ssl-tls.html[TLS +setup instructions] if you need help getting started. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Bundle JDK in Elasticsearch distribution + +One of the more prominent "getting started hurdles" we’ve seen users run into +has been not knowing that Elasticsearch is a Java application and that they need +to install one of the supported JDKs first. With 7.0, we’re now bundling a +distribution of OpenJDK to help users get started with Elasticsearch even +faster. We understand that some users have preferred JDK distributions, so we +also support bringing your own JDK. If you want to bring your own JDK, you can +still do so by +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/setup.html#jvm-version[setting +JAVA_HOME] before starting Elasticsearch. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Rank features + +Elasticsearch 7.0 has several new field types to get the most out of your data. +Two to help with core search use cases are +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/rank-feature.html[`rank_feature`] +and +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/rank-features.html[`rank_features`]. +These can be used to boost documents based on numeric or categorical values +while still maintaining the performance of the new fast top hits query +capabilities. For more information on these fields and how to use them, read our +https://www.elastic.co/blog/easier-relevance-tuning-elasticsearch-7-0[blog +post]. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== JSON logging + +JSON logging is now enabled in Elasticsearch in addition to plaintext +logs. Starting in 7.0, you will find new files with `.json` extensions in your +log directory. This means you can now use filtering tools like +https://stedolan.github.io/jq/[`jq`] to pretty print and process your logs in a +much more structured manner. You can also expect finding additional information +like `node.id`, `cluster.uuid`, `type` (and more) in each log line. The `type` +field per each JSON log line will let you to distinguish log streams when +running on docker. +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +=== Script score query (aka function score 2.0) + +With 7.0, we are introducing the +https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-script-score-query.html[next +generation of our function score capability]. This new script_score query +provides a new, simpler, and more flexible way to generate a ranking score per +record. The script_score query is constructed of a set of functions, including +arithmetic and distance functions, which the user can mix and match to construct +arbitrary function score calculations. The modular structure is simpler to use +and will open this important functionality to additional users. +//end::notable-highlights[] diff --git a/docs/reference/release-notes/highlights-7.1.0.asciidoc b/docs/reference/release-notes/highlights-7.1.0.asciidoc new file mode 100644 index 0000000000000..26132c1daf410 --- /dev/null +++ b/docs/reference/release-notes/highlights-7.1.0.asciidoc @@ -0,0 +1,40 @@ +[[release-highlights-7.1.0]] +== 7.1.0 release highlights +++++ +7.1.0 +++++ + +See also <>. + +//tag::notable-highlights[] +[float] +==== TLS is now licensed under the Elastic Basic license + +Transport Layer Security (TLS), commonly referred to as SSL, is now +licensed under the free-of-charge Elastic Basic license. Previously, this security feature +required a paid Gold-tier subscription. With the default distribution, +you can now encrypt all Elasticsearch communication, within a cluster and across remotes +clusters. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch], +https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-tls.html[configure TLS], +and run your cluster in production, knowing all Elasticsearch communication is safely encrypted. +For details, see https://www.elastic.co/subscriptions +//end::notable-highlights[] + +//tag::notable-highlights[] +[float] +==== RBAC is now licensed under the Elastic Basic license + +RBAC (Role Based Access Control) is now licenced under the free-of-charge Elastic Basic licence. +Previously, this security feature required a paid Gold-tier subscription. +With the default distribution you can take advantage of RBAC by configuring users, groups, roles +and permissions for any user from the +https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-file-realm.html[file realm] +or the https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-native-realm.html[native realm] +. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch], +https://www.elastic.co/guide/en/elastic-stack-overview/7.1/authorization.html[configure RBAC], +and run your cluster in production, knowing your private data stays private. +Note that our advanced security features, such as single sign-on and Active Directory/LDAP +authentication to field-level and document-level security, remain paid features. +For details, see https://www.elastic.co/subscriptions + +//end::notable-highlights[] diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 5b20b67061d03..38501b4a79557 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,16 +1,15 @@ [[release-highlights]] -= {es} Release highlights -++++ -Release highlights -++++ += Release highlights [partintro] -- This section summarizes the most important changes in each release. For the full list, see <> and <>. +* <> * <> -- +include::highlights-7.1.0.asciidoc[] include::highlights-7.0.0.asciidoc[] \ No newline at end of file diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index e834249724aa3..c33397f7c14ba 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -8,7 +8,7 @@ directly to configure and access {xpack} features. * <> -* <> +* <> * <> * <>, <> * <> diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc index 21eefefb4b12b..d1ea03b6284d7 100644 --- a/docs/reference/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -5,7 +5,7 @@ experimental[] -Most {rollup} endpoints have the following base: +Most rollup endpoints have the following base: [source,js] ---- diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index 885d4e82cf6b0..852f7b879fb38 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -69,7 +69,7 @@ In the above example, there are several pieces of logistical configuration for t `rollup_index` (required):: (string) The index that you wish to store rollup results into. All the rollup data that is generated by the job will be stored in this index. When searching the rollup data, this index will be used in the <> endpoint's URL. - The rollup index be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. + The rollup index can be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. `cron` (required):: (string) A cron string which defines when the rollup job should be executed. The cron string defines an interval of when to run diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 50935826f5f53..35162246a5fbb 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -56,6 +56,7 @@ POST _rollup/job/sensor/_stop -------------------------------------------------- // CONSOLE // TEST[setup:sensor_started_rollup_job] +// TEST[s/_stop/_stop?wait_for_completion=true&timeout=10s/] Which will return the response: diff --git a/docs/reference/rollup/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc index 099686fb4329d..5981336d0a054 100644 --- a/docs/reference/rollup/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -9,7 +9,7 @@ * <>, <>, * <>, <>, -* <> +* <> * <> [float] diff --git a/docs/reference/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc index c8a736450bde0..81cb162bd3c11 100644 --- a/docs/reference/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -32,6 +32,7 @@ To help simplify the problem, we have limited search to just one rollup index at may be able to open this up to multiple rollup jobs. [float] +[[aggregate-stored-only]] === Can only aggregate what's been stored A perhaps obvious limitation, but rollups can only aggregate on data that has been stored in the rollups. If you don't configure the diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/scripting.asciidoc similarity index 86% rename from docs/reference/modules/scripting.asciidoc rename to docs/reference/scripting.asciidoc index 44696ea94bb89..33b8795a58114 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/scripting.asciidoc @@ -1,9 +1,11 @@ [[modules-scripting]] -== Scripting += Scripting -The scripting module enables you to use scripts to evaluate custom -expressions. For example, you could use a script to return "script fields" -as part of a search request or evaluate a custom score for a query. +[partintro] +-- +With scripting, you can evaluate custom expressions in {es}. For example, you +could use a script to return "script fields" as part of a search request or +evaluate a custom score for a query. The default scripting language is <>. Additional `lang` plugins enable you to run scripts written in other languages. @@ -11,7 +13,7 @@ Everywhere a script can be used, you can include a `lang` parameter to specify the language of the script. [float] -=== General-purpose languages: +== General-purpose languages These languages can be used for any purpose in the scripting APIs, and give the most flexibility. @@ -29,7 +31,7 @@ and give the most flexibility. |======================================================================= [float] -=== Special-purpose languages: +== Special-purpose languages These languages are less flexible, but typically have higher performance for certain tasks. @@ -67,7 +69,7 @@ sandboxed languages can be a security issue, please read <> for more details. ================================================= - +-- include::scripting/using.asciidoc[] diff --git a/docs/reference/modules/scripting/engine.asciidoc b/docs/reference/scripting/engine.asciidoc similarity index 97% rename from docs/reference/modules/scripting/engine.asciidoc rename to docs/reference/scripting/engine.asciidoc index da3b4529daacc..b4a2cd29fdd72 100644 --- a/docs/reference/modules/scripting/engine.asciidoc +++ b/docs/reference/scripting/engine.asciidoc @@ -1,5 +1,5 @@ [[modules-scripting-engine]] -=== Advanced scripts using script engines +== Advanced scripts using script engines A `ScriptEngine` is a backend for implementing a scripting language. It may also be used to write scripts that need to use advanced internals of scripting. For example, diff --git a/docs/reference/modules/scripting/expression.asciidoc b/docs/reference/scripting/expression.asciidoc similarity index 99% rename from docs/reference/modules/scripting/expression.asciidoc rename to docs/reference/scripting/expression.asciidoc index fb7739261ef4a..fe58cbbdf131b 100644 --- a/docs/reference/modules/scripting/expression.asciidoc +++ b/docs/reference/scripting/expression.asciidoc @@ -1,5 +1,5 @@ [[modules-scripting-expression]] -=== Lucene Expressions Language +== Lucene expressions language Lucene's expressions compile a `javascript` expression to bytecode. They are designed for high-performance custom ranking and sorting functions and are @@ -112,6 +112,7 @@ The following example shows the difference in years between the `date` fields da `doc['date1'].date.year - doc['date0'].date.year` [float] +[[geo-point-field-api]] === `geo_point` field API [cols="<,<",options="header",] |======================================================================= diff --git a/docs/reference/modules/scripting/fields.asciidoc b/docs/reference/scripting/fields.asciidoc similarity index 98% rename from docs/reference/modules/scripting/fields.asciidoc rename to docs/reference/scripting/fields.asciidoc index 681e8d6e54067..cf8905189f799 100644 --- a/docs/reference/modules/scripting/fields.asciidoc +++ b/docs/reference/scripting/fields.asciidoc @@ -1,5 +1,5 @@ [[modules-scripting-fields]] -=== Accessing document fields and special variables +== Accessing document fields and special variables Depending on where a script is used, it will have access to certain special variables and document fields. @@ -17,7 +17,7 @@ API will have access to the `ctx` variable which exposes: `ctx._index` etc:: Access to <>, some of which may be read-only. [float] -== Search and Aggregation scripts +== Search and aggregation scripts With the exception of <> which are executed once per search hit, scripts used in search and aggregations will be @@ -80,7 +80,7 @@ GET my_index/_search [float] [[modules-scripting-doc-vals]] -=== Doc Values +=== Doc values By far the fastest most efficient way to access a field value from a script is to use the `doc['field_name']` syntax, which retrieves the field @@ -140,7 +140,7 @@ access `text` fields from scripts. [float] [[modules-scripting-stored]] -=== Stored Fields and `_source` +=== Stored fields and `_source` _Stored fields_ -- fields explicitly marked as <> -- can be accessed using the diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/scripting/painless.asciidoc similarity index 70% rename from docs/reference/modules/scripting/painless.asciidoc rename to docs/reference/scripting/painless.asciidoc index ac48aad73d28f..085399de46f2f 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/scripting/painless.asciidoc @@ -1,7 +1,7 @@ [[modules-scripting-painless]] -=== Painless Scripting Language +== Painless scripting language -include::../../../painless/painless-description.asciidoc[] +include::../../painless/painless-description.asciidoc[] Ready to start scripting with Painless? See {painless}/painless-getting-started.html[Getting Started with Painless] in the guide to the {painless}/index.html[Painless Scripting Language]. \ No newline at end of file diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/scripting/security.asciidoc similarity index 99% rename from docs/reference/modules/scripting/security.asciidoc rename to docs/reference/scripting/security.asciidoc index f1946bd0f2876..421cec2ccf760 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/scripting/security.asciidoc @@ -1,5 +1,5 @@ [[modules-scripting-security]] -=== Scripting and security +== Scripting and security While Elasticsearch contributors make every effort to prevent scripts from running amok, security is something best done in diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc similarity index 86% rename from docs/reference/modules/scripting/using.asciidoc rename to docs/reference/scripting/using.asciidoc index 86202a98dd537..2859f94088c63 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -1,5 +1,5 @@ [[modules-scripting-using]] -=== How to use scripts +== How to use scripts Wherever scripting is supported in the Elasticsearch API, the syntax follows the same pattern: @@ -45,7 +45,7 @@ GET my_index/_search // CONSOLE [float] -=== Script Parameters +=== Script parameters `lang`:: @@ -107,7 +107,7 @@ minute will be compiled. You can change this setting dynamically by setting [float] [[modules-scripting-short-script-form]] -=== Short Script Form +=== Short script form A short script form can be used for brevity. In the short form, `script` is represented by a string instead of an object. This string contains the source of the script. @@ -131,12 +131,13 @@ The same script in the normal form: [float] [[modules-scripting-stored-scripts]] -=== Stored Scripts +=== Stored scripts Scripts may be stored in and retrieved from the cluster state using the `_scripts` end-point. -==== Request Examples +[float] +==== Request examples The following are examples of using a stored script that lives at `/_scripts/{id}`. @@ -194,9 +195,24 @@ DELETE _scripts/calculate-score // CONSOLE // TEST[continued] +[float] +[[modules-scripting-search-templates]] +=== Search templates +You can also use the `_scripts` API to store **search templates**. Search +templates save specific <> with placeholder +values, called template parameters. + +You can use stored search templates to run searches without writing out the +entire query. Just provide the stored template's ID and the template parameters. +This is useful when you want to run a commonly used query quickly and without +mistakes. + +Search templates use the http://mustache.github.io/mustache.5.html[mustache +templating language]. See <> for more information and examples. + [float] [[modules-scripting-using-caching]] -=== Script Caching +=== Script caching All scripts are cached by default so that they only need to be recompiled when updates occur. By default, scripts do not have a time-based expiration, but diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index dd7faca60aa92..0696e6ef8b335 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -22,7 +22,7 @@ the user name: POST /twitter/_doc?routing=kimchy { "user" : "kimchy", - "postDate" : "2009-11-15T14:12:12", + "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch" } -------------------------------------------------- @@ -154,12 +154,11 @@ configure a soft limit, you can update the `action.search.shard_count.limit` cluster setting in order to reject search requests that hit too many shards. The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the search API will execute for the -request. This parameter should be used to protect a single request from +maximum number of concurrent shard requests the search API will execute per node +for the request. This parameter should be used to protect a single request from overloading a cluster (e.g., a default request will hit all indices in a cluster which could cause shard request rejections if the number of shards per node is -high). This default is based on the number of data nodes in the cluster but at -most `256`. +high). This default value is `5`. -- diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 1df2279c26caf..034bc92930b10 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -3,7 +3,7 @@ The field capabilities API allows to retrieve the capabilities of fields among multiple indices. -The field capabilities api by default executes on all indices: +The field capabilities API by default executes on all indices: [source,js] -------------------------------------------------- @@ -11,7 +11,7 @@ GET _field_caps?fields=rating -------------------------------------------------- // CONSOLE -... but the request can also be restricted to specific indices: +The request can also be restricted to specific indices: [source,js] -------------------------------------------------- @@ -29,7 +29,7 @@ Supported request options: [float] === Field Capabilities -The field capabilities api returns the following information per field: +The field capabilities API returns the following information per field: [horizontal] `searchable`:: diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 9e3bff3c0c063..87a87c922b37c 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -85,15 +85,16 @@ The msearch's `max_concurrent_searches` request parameter can be used to control the maximum number of concurrent searches the multi search api will execute. This default is based on the number of data nodes and the default search thread pool size. -The request parameter `max_concurrent_shard_requests` can be used to control the -maximum number of concurrent shard requests the each sub search request will execute. -This parameter should be used to protect a single request from overloading a cluster -(e.g., a default request will hit all indices in a cluster which could cause shard request rejections -if the number of shards per node is high). This default is based on the number of -data nodes in the cluster but at most `256`.In certain scenarios parallelism isn't achieved through -concurrent request such that this protection will result in poor performance. For -instance in an environment where only a very low number of concurrent search requests are expected -it might help to increase this value to a higher number. +The request parameter `max_concurrent_shard_requests` can be used to control +the maximum number of concurrent shard requests that each sub search request +will execute per node. This parameter should be used to protect a single +request from overloading a cluster (e.g., a default request will hit all +indices in a cluster which could cause shard request rejections if the number +of shards per node is high). This default value is `5`.In certain scenarios +parallelism isn't achieved through concurrent request such that this protection +will result in poor performance. For instance in an environment where only a +very low number of concurrent search requests are expected it might help to +increase this value to a higher number. [float] [[msearch-security]] @@ -173,3 +174,8 @@ GET _msearch/template ----------------------------------------------- // CONSOLE // TEST[continued] + +[float] +[[multi-search-partial-responses]] +=== Partial responses +To ensure fast responses, the multi search API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 824a5771ae8d3..09138c78f6442 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -82,7 +82,9 @@ This will yield the following result: "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 }, "children": [ { @@ -105,7 +107,9 @@ This will yield the following result: "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 } }, { @@ -128,7 +132,9 @@ This will yield the following result: "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 } } ] @@ -204,16 +210,16 @@ by a unique ID Because a search request may be executed against one or more shards in an index, and a search may cover one or more indices, the top level element in the profile response is an array of `shard` objects. -Each shard object lists it's `id` which uniquely identifies the shard. The ID's format is +Each shard object lists its `id` which uniquely identifies the shard. The ID's format is `[nodeID][indexName][shardID]`. The profile itself may consist of one or more "searches", where a search is a query executed against the underlying -Lucene index. Most Search Requests submitted by the user will only execute a single `search` against the Lucene index. +Lucene index. Most search requests submitted by the user will only execute a single `search` against the Lucene index. But occasionally multiple searches will be executed, such as including a global aggregation (which needs to execute a secondary "match_all" query for the global context). Inside each `search` object there will be two arrays of profiled information: -a `query` array and a `collector` array. Alongside the `search` object is an `aggregations` object that contains the profile information for the aggregations. In the future, more sections may be added, such as `suggest`, `highlight`, etc +a `query` array and a `collector` array. Alongside the `search` object is an `aggregations` object that contains the profile information for the aggregations. In the future, more sections may be added, such as `suggest`, `highlight`, etc. There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds). @@ -237,6 +243,7 @@ sufficient to see that a particular component of a query is slow, and not necess the `advance` phase of that query is the cause, for example. ======================================= +[[query-section]] ==== `query` Section The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. @@ -311,7 +318,9 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 } -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/] @@ -344,12 +353,12 @@ The meaning of the stats are as follows: `build_scorer`:: This parameter shows how long it takes to build a Scorer for the query. A Scorer is the mechanism that - iterates over matching documents generates a score per-document (e.g. how well does "foo" match the document?). + iterates over matching documents and generates a score per-document (e.g. how well does "foo" match the document?). Note, this records the time required to generate the Scorer object, not actually score the documents. Some queries have faster or slower initialization of the Scorer, depending on optimizations, complexity, etc. {empty} + {empty} + - This may also showing timing associated with caching, if enabled and/or applicable for the query + This may also show timing associated with caching, if enabled and/or applicable for the query `next_doc`:: @@ -367,11 +376,11 @@ The meaning of the stats are as follows: {empty} + Conjunctions (e.g. `must` clauses in a boolean) are typical consumers of `advance` -`matches`:: +`match`:: - Some queries, such as phrase queries, match documents using a "Two Phase" process. First, the document is + Some queries, such as phrase queries, match documents using a "two-phase" process. First, the document is "approximately" matched, and if it matches approximately, it is checked a second time with a more rigorous - (and expensive) process. The second phase verification is what the `matches` statistic measures. + (and expensive) process. The second phase verification is what the `match` statistic measures. {empty} + {empty} + For example, a phrase query first checks a document approximately by ensuring all terms in the phrase are @@ -380,21 +389,22 @@ The meaning of the stats are as follows: of the terms. {empty} + {empty} + - Because this two-phase process is only used by a handful of queries, the `metric` statistic will often be zero + Because this two-phase process is only used by a handful of queries, the `match` statistic is often zero `score`:: - This records the time taken to score a particular document via it's Scorer + This records the time taken to score a particular document via its Scorer `*_count`:: Records the number of invocations of the particular method. For example, `"next_doc_count": 2,` means the `nextDoc()` method was called on two different documents. This can be used to help judge how selective queries are, by comparing counts between different query components. +[[collectors-section]] ==== `collectors` Section The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" -which is responsible for coordinating the traversal, scoring and collection of matching documents. Collectors +which is responsible for coordinating the traversal, scoring, and collection of matching documents. Collectors are also how a single query can record aggregation results, execute unscoped "global" queries, execute post-query filters, etc. @@ -422,16 +432,16 @@ Looking at the previous example: // TESTRESPONSE[s/(?<=[" ])\d+(\.\d+)?/$body.$_path/] We see a single collector named `SimpleTopScoreDocCollector` wrapped into `CancellableCollector`. `SimpleTopScoreDocCollector` is the default "scoring and sorting" -`Collector` used by Elasticsearch. The `reason` field attempts to give a plain english description of the class name. The +`Collector` used by Elasticsearch. The `reason` field attempts to give a plain English description of the class name. The `time_in_nanos` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists all sub-collectors. The `CancellableCollector` that wraps `SimpleTopScoreDocCollector` is used by Elasticsearch to detect if the current search was cancelled and stop collecting documents as soon as it occurs. -It should be noted that Collector times are **independent** from the Query times. They are calculated, combined +It should be noted that Collector times are **independent** from the Query times. They are calculated, combined, and normalized independently! Due to the nature of Lucene's execution, it is impossible to "merge" the times from the Collectors into the Query section, so they are displayed in separate portions. -For reference, the various collector reason's are: +For reference, the various collector reasons are: [horizontal] `search_sorted`:: @@ -457,7 +467,7 @@ For reference, the various collector reason's are: `search_multi`:: A collector that wraps several other collectors. This is seen when combinations of search, aggregations, - global aggs and post_filters are combined in a single search. + global aggs, and post_filters are combined in a single search. `search_timeout`:: @@ -473,11 +483,11 @@ For reference, the various collector reason's are: `global_aggregation`:: A collector that executes an aggregation against the global query scope, rather than the specified query. - Because the global scope is necessarily different from the executed query, it must execute it's own + Because the global scope is necessarily different from the executed query, it must execute its own match_all query (which you will see added to the Query section) to collect your entire dataset - +[[rewrite-section]] ==== `rewrite` Section All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or @@ -575,7 +585,9 @@ And the response: "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 } }, { @@ -598,7 +610,9 @@ And the response: "compute_max_score": 0, "compute_max_score_count": 0, "shallow_advance": 0, - "shallow_advance_count": 0 + "shallow_advance_count": 0, + "set_min_competitive_score": 0, + "set_min_competitive_score_count": 0 } } ], @@ -648,9 +662,9 @@ And the response: // TESTRESPONSE[s/\.\.\.//] // TESTRESPONSE[s/(?<=[" ])\d+(\.\d+)?/$body.$_path/] // TESTRESPONSE[s/"id": "\[P6-vulHtQRWuD4YnubWb7A\]\[test\]\[0\]"/"id": $body.profile.shards.0.id/] -<1> The ``"aggregations"` portion has been omitted because it will be covered in the next section +<1> The `"aggregations"` portion has been omitted because it will be covered in the next section -As you can see, the output is significantly verbose from before. All the major portions of the query are +As you can see, the output is significantly more verbose than before. All the major portions of the query are represented: 1. The first `TermQuery` (user:test) represents the main `term` query @@ -662,14 +676,14 @@ The Collector tree is fairly straightforward, showing how a single CancellableCo ==== Understanding MultiTermQuery output -A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex and fuzzy +A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex, and fuzzy queries. These queries emit very verbose responses, and are not overly structured. Essentially, these queries rewrite themselves on a per-segment basis. If you imagine the wildcard query `b*`, it technically can match any token that begins with the letter "b". It would be impossible to enumerate all possible combinations, -so Lucene rewrites the query in context of the segment being evaluated. E.g. one segment may contain the tokens +so Lucene rewrites the query in context of the segment being evaluated, e.g., one segment may contain the tokens `[bar, baz]`, so the query rewrites to a BooleanQuery combination of "bar" and "baz". Another segment may only have the -token `[bakery]`, so query rewrites to a single TermQuery for "bakery". +token `[bakery]`, so the query rewrites to a single TermQuery for "bakery". Due to this dynamic, per-segment rewriting, the clean tree structure becomes distorted and no longer follows a clean "lineage" showing how one query rewrites into the next. At present time, all we can do is apologize, and suggest you @@ -682,6 +696,7 @@ Hopefully this will be fixed in future iterations, but it is a tricky problem to [[search-profile-aggregations]] === Profiling Aggregations +[[agg-section]] ==== `aggregations` Section @@ -729,7 +744,7 @@ GET /twitter/_search // TEST[s/_search/_search\?filter_path=profile.shards.aggregations/] // TEST[continued] -Which yields the following aggregation profile output +This yields the following aggregation profile output: [source,js] -------------------------------------------------- @@ -797,7 +812,7 @@ Which yields the following aggregation profile output From the profile structure we can see that the `my_scoped_agg` is internally being run as a `LongTermsAggregator` (because the field it is aggregating, `likes`, is a numeric field). At the same level, we see a `GlobalAggregator` which comes from `my_global_agg`. That -aggregation then has a child `LongTermsAggregator` which from the second terms aggregation on `likes`. +aggregation then has a child `LongTermsAggregator` which comes from the second term's aggregation on `likes`. The `time_in_nanos` field shows the time executed by each aggregation, and is inclusive of all children. While the overall time is useful, the `breakdown` field will give detailed stats about how the time was spent. @@ -859,7 +874,7 @@ The meaning of the stats are as follows: ==== Performance Notes Like any profiler, the Profile API introduces a non-negligible overhead to search execution. The act of instrumenting -low-level method calls such as `collect`, `advance` and `next_doc` can be fairly expensive, since these methods are called +low-level method calls such as `collect`, `advance`, and `next_doc` can be fairly expensive, since these methods are called in tight loops. Therefore, profiling should not be enabled in production settings by default, and should not be compared against non-profiled query times. Profiling is just a diagnostic tool. @@ -871,11 +886,11 @@ not have a drastic effect compared to other components in the profiled query. ==== Limitations - Profiling currently does not measure the search fetch phase nor the network overhead -- Profiling also does not account for time spent in the queue, merging shard responses on the coordinating node or -additional work like e.g. building global ordinals (an internal data structure used to speed up search) +- Profiling also does not account for time spent in the queue, merging shard responses on the coordinating node, or +additional work such as building global ordinals (an internal data structure used to speed up search) - Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` - Profiling of the reduce phase of aggregation is currently not available - The Profiler is still highly experimental. The Profiler is instrumenting parts of Lucene that were never designed to be exposed in this manner, and so all results should be viewed as a best effort to provide detailed -diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures or +diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures, or other bugs, please report them! diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index c549b5e7a689b..0ec2e070b1c74 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,7 +1,7 @@ [[search-rank-eval]] == Ranking Evaluation API -experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] +experimental["The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features."] The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a @@ -44,7 +44,7 @@ GET /my_index/_rank_eval "mean_reciprocal_rank": { ... } <3> } } ------------------------------- +----------------------------- // NOTCONSOLE <1> a set of typical search requests, together with their provided ratings @@ -77,7 +77,7 @@ The request section contains several search requests typical to your application ] } ] ------------------------------- +----------------------------- // NOTCONSOLE <1> the search requests id, used to group result details later @@ -111,7 +111,7 @@ GET /my_index/_rank_eval ], "requests": [ { - "id": "amsterdam_query" + "id": "amsterdam_query", "ratings": [ ... ], "template_id": "match_one_field_query", <3> "params": { <4> @@ -136,6 +136,7 @@ The `metric` section determines which of the available evaluation metrics is goi Currently, the following metrics are supported: [float] +[[k-precision]] ==== Precision at K (P@k) This metric measures the number of relevant results in the top k search results. Its a form of the well known https://en.wikipedia.org/wiki/Information_retrieval#Precision[Precision] metric that only looks at the top k documents. It is the fraction of relevant documents in those first k diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 120c4c6757599..a4a9846283151 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -113,11 +113,12 @@ And here is a sample response: reduce the memory overhead per search request if the potential number of shards in the request can be large. +[[ccs-minimize-roundtrips]] `ccs_minimize_roundtrips`:: Defaults to `true`. Set to `false` to disable minimizing network round-trips between the coordinating node and the remote clusters when executing - cross-cluster search requests. See <> for more. + {ccs} requests. See <> for more. Out of the above, the `search_type`, `request_cache` and the `allow_partial_search_results` @@ -188,46 +189,48 @@ to the client. This means it includes the time spent waiting in thread pools, executing a distributed search across the whole cluster and gathering all the results. -include::request/query.asciidoc[] +include::request/docvalue-fields.asciidoc[] -include::request/from-size.asciidoc[] +include::request/explain.asciidoc[] -include::request/sort.asciidoc[] +include::request/collapse.asciidoc[] -include::request/track-total-hits.asciidoc[] +include::request/from-size.asciidoc[] -include::request/source-filtering.asciidoc[] +include::request/highlighting.asciidoc[] -include::request/stored-fields.asciidoc[] +include::request/index-boost.asciidoc[] -include::request/script-fields.asciidoc[] +include::request/inner-hits.asciidoc[] -include::request/docvalue-fields.asciidoc[] +include::request/min-score.asciidoc[] + +include::request/named-queries-and-filters.asciidoc[] include::request/post-filter.asciidoc[] -include::request/highlighting.asciidoc[] +include::request/preference.asciidoc[] + +include::request/query.asciidoc[] include::request/rescore.asciidoc[] -include::request/search-type.asciidoc[] +include::request/script-fields.asciidoc[] include::request/scroll.asciidoc[] -include::request/preference.asciidoc[] - -include::request/explain.asciidoc[] +include::request/search-after.asciidoc[] -include::request/version-and-seq-no.asciidoc[] +include::request/search-type.asciidoc[] -include::request/index-boost.asciidoc[] +include::request/seq-no.asciidoc[] -include::request/min-score.asciidoc[] +include::request/sort.asciidoc[] -include::request/named-queries-and-filters.asciidoc[] +include::request/source-filtering.asciidoc[] -include::request/inner-hits.asciidoc[] +include::request/stored-fields.asciidoc[] -include::request/collapse.asciidoc[] +include::request/track-total-hits.asciidoc[] -include::request/search-after.asciidoc[] +include::request/version.asciidoc[] diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index 6697b5bb3e383..784cc94015366 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -67,3 +67,7 @@ on their mappings: `long`, `double` and other numeric fields are formatted as numbers, `keyword` fields are formatted as strings, `date` fields are formatted with the configured `date` format, etc. +NOTE: On its own, `docvalue_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that docvalue field. To access nested fields, `docvalue_fields` +must be used within an <> block. \ No newline at end of file diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index ad836c7c535e7..347c2c2dbf255 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -175,7 +175,7 @@ snippets: `simple` or `span`. Only valid for the `plain` highlighter. Defaults to `span`. `simple`::: Breaks up text into same-sized fragments. -`span`::: Breaks up text into same-sized fragments, but tried to avoid +`span`::: Breaks up text into same-sized fragments, but tries to avoid breaking up text between highlighted terms. This is helpful when you're querying for phrases. Default. diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index da5868ea7d65e..1bd61e0048182 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -33,7 +33,7 @@ GET /_search // CONSOLE // TEST[setup:sales] -Script fields can work on fields that are not stored (`my_field_name` in +Script fields can work on fields that are not stored (`price` in the above case), and allow to return custom values to be returned (the evaluated value of the script). diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index ebc2f0aca6cb0..3503ad2c9c826 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -103,6 +103,12 @@ GET /_search?scroll=1m [[scroll-search-context]] ==== Keeping the search context alive +A scroll returns all the documents which matched the search at the time of the +initial search request. It ignores any subsequent changes to these documents. +The `scroll_id` identifies a _search context_ which keeps track of everything +that {es} needs to return the correct documents. The search context is created +by the initial request and kept alive by subsequent requests. + The `scroll` parameter (passed to the `search` request and to every `scroll` request) tells Elasticsearch how long it should keep the search context alive. Its value (e.g. `1m`, see <>) does not need to be long enough to @@ -112,17 +118,21 @@ new expiry time. If a `scroll` request doesn't pass in the `scroll` parameter, then the search context will be freed as part of _that_ `scroll` request. -Normally, the background merge process optimizes the -index by merging together smaller segments to create new bigger segments, at -which time the smaller segments are deleted. This process continues during -scrolling, but an open search context prevents the old segments from being -deleted while they are still in use. This is how Elasticsearch is able to -return the results of the initial search request, regardless of subsequent -changes to documents. - -TIP: Keeping older segments alive means that more file handles are needed. -Ensure that you have configured your nodes to have ample free file handles. -See <>. +Normally, the background merge process optimizes the index by merging together +smaller segments to create new, bigger segments. Once the smaller segments are +no longer needed they are deleted. This process continues during scrolling, but +an open search context prevents the old segments from being deleted since they +are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles +are needed. Ensure that you have configured your nodes to have ample free file +handles. See <>. + +Additionally, if a segment contains deleted or updated documents then the +search context must keep track of whether each document in the segment was live +at the time of the initial search request. Ensure that your nodes have +sufficient heap space if you have many open scrolls on an index that is subject +to ongoing deletes or updates. NOTE: To prevent against issues caused by having too many scrolls open, the user is not allowed to open scrolls past a certain limit. By default, the diff --git a/docs/reference/search/request/version-and-seq-no.asciidoc b/docs/reference/search/request/seq-no.asciidoc similarity index 60% rename from docs/reference/search/request/version-and-seq-no.asciidoc rename to docs/reference/search/request/seq-no.asciidoc index 2bca4c985b290..0ab7bec4487d9 100644 --- a/docs/reference/search/request/version-and-seq-no.asciidoc +++ b/docs/reference/search/request/seq-no.asciidoc @@ -15,20 +15,3 @@ GET /_search } -------------------------------------------------- // CONSOLE - -[[search-request-version]] -=== Version - -Returns a version for each search hit. - -[source,js] --------------------------------------------------- -GET /_search -{ - "version": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} --------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index bd8c0d1ad5c27..e449fb581688c 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -82,6 +82,10 @@ to. The `mode` option can have the following values: `median`:: Use the median of all values as sort value. Only applicable for number based array fields. +The default sort mode in the ascending sort order is `min` -- the lowest value +is picked. The default sort mode in the descending order is `max` -- +the highest value is picked. + ===== Sort mode example usage In the example below the field price has multiple prices per document. diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index 2feb9313d8a10..b55e0fce45757 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -1,5 +1,5 @@ [[search-request-stored-fields]] -=== Fields +=== Stored Fields WARNING: The `stored_fields` parameter is about fields that are explicitly marked as stored in the mapping, which is off by default and generally not recommended. @@ -49,6 +49,11 @@ Script fields can also be automatically detected and used as fields, so things like `_source.obj1.field1` can be used, though not recommended, as `obj1.field1` will work as well. +NOTE: On its own, `stored_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that stored field. To access nested fields, `stored_fields` +must be used within an <> block. + ==== Disable stored fields entirely To disable the stored fields (and metadata fields) entirely use: `_none_`: diff --git a/docs/reference/search/request/track-total-hits.asciidoc b/docs/reference/search/request/track-total-hits.asciidoc index c416c777366e4..210f6321816e0 100644 --- a/docs/reference/search/request/track-total-hits.asciidoc +++ b/docs/reference/search/request/track-total-hits.asciidoc @@ -13,7 +13,7 @@ of hits after a certain threshold. When set to `true` the search response will always track the number of hits that match the query accurately (e.g. `total.relation` will always be equal to `"eq"` -when `track_total_hits is set to true). Otherwise the `"total.relation"` returned +when `track_total_hits` is set to true). Otherwise the `"total.relation"` returned in the `"total"` object in the search response determines how the `"total.value"` should be interpreted. A value of `"gte"` means that the `"total.value"` is a lower bound of the total hits that match the query and a value of `"eq"` indicates @@ -178,4 +178,4 @@ GET twitter/_search <1> The total number of hits is unknown. Finally you can force an accurate count by setting `"track_total_hits"` -to `true` in the request. \ No newline at end of file +to `true` in the request. diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc new file mode 100644 index 0000000000000..57c6ce27feb91 --- /dev/null +++ b/docs/reference/search/request/version.asciidoc @@ -0,0 +1,16 @@ +[[search-request-version]] +=== Version + +Returns a version for each search hit. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "version": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 6b96564e9c0fe..121c4955d8e2b 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -32,7 +32,209 @@ disable scripts per type and context as described in the <> [float] -==== More template examples +==== Examples + +[float] +[[pre-registered-templates]] +===== Store a search template + +You can store a search template using the stored scripts API. + +[source,js] +------------------------------------------ +POST _scripts/ +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "title": "{{query_string}}" + } + } + } + } +} +------------------------------------------ +// CONSOLE +// TEST[continued] + +////////////////////////// + +We want to be sure that the template has been created, +because we'll use it later. + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +This template can be retrieved by + +[source,js] +------------------------------------------ +GET _scripts/ +------------------------------------------ +// CONSOLE +// TEST[continued] + +which is rendered as: + +[source,js] +------------------------------------------ +{ + "script" : { + "lang" : "mustache", + "source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}", + "options": { + "content_type" : "application/json; charset=UTF-8" + } + }, + "_id": "", + "found": true +} +------------------------------------------ +// TESTRESPONSE + +This template can be deleted by + +[source,js] +------------------------------------------ +DELETE _scripts/ +------------------------------------------ +// CONSOLE +// TEST[continued] + +////////////////////////// + +We want to be sure that the template has been created, +because we'll use it later. + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +[float] +[[use-registered-templates]] +===== Use a stored search template + +To use a stored template at search time use: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "", <1> + "params": { + "query_string": "search for these words" + } +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] +<1> Name of the stored template script. + +[float] +[[_validating_templates]] +==== Validate a search template + +A template can be rendered in a response with given parameters using + +[source,js] +------------------------------------------ +GET _render/template +{ + "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", + "params": { + "statuses" : { + "status": [ "pending", "published" ] + } + } +} +------------------------------------------ +// CONSOLE + +This call will return the rendered template: + +[source,js] +------------------------------------------ +{ + "template_output": { + "query": { + "terms": { + "status": [ <1> + "pending", + "published" + ] + } + } + } +} +------------------------------------------ +// TESTRESPONSE +<1> `status` array has been populated with values from the `params` object. + +Stored templates can also be rendered using + +[source,js] +------------------------------------------ +GET _render/template/ +{ + "params": { + "..." + } +} +------------------------------------------ +// NOTCONSOLE + +[float] +===== Explain + +You can use `explain` parameter when running a template: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "my_template", + "params": { + "status": [ "pending", "published" ] + }, + "explain": true +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] + +[float] +===== Profiling + +You can use `profile` parameter when running a template: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "my_template", + "params": { + "status": [ "pending", "published" ] + }, + "profile": true +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] [float] ===== Filling in a query string with a single value @@ -289,14 +491,14 @@ The `params` would look like: "params": { "text": "words to search for", "line_no": { <1> - "start": 10, <1> - "end": 20 <1> + "start": 10, + "end": 20 } } } ------------------------------------------ // NOTCONSOLE -<1> All three of these elements are optional. +<1> The `line_no`, `start`, and `end` parameters are optional. We could write the query as: @@ -317,13 +519,13 @@ We could write the query as: {{#start}} <3> "gte": "{{start}}" <4> {{#end}},{{/end}} <5> - {{/start}} <3> + {{/start}} {{#end}} <6> "lte": "{{end}}" <7> - {{/end}} <6> + {{/end}} } } - {{/line_no}} <2> + {{/line_no}} } } } @@ -397,204 +599,6 @@ The previous query will be rendered as: ------------------------------------------ // TESTRESPONSE - -[float] -[[pre-registered-templates]] -===== Pre-registered template - -You can register search templates by using the stored scripts api. - -[source,js] ------------------------------------------- -POST _scripts/ -{ - "script": { - "lang": "mustache", - "source": { - "query": { - "match": { - "title": "{{query_string}}" - } - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[continued] - -////////////////////////// - -We want to be sure that the template has been created, -because we'll use it later. - -[source,js] --------------------------------------------------- -{ - "acknowledged" : true -} --------------------------------------------------- -// TESTRESPONSE - -////////////////////////// - -This template can be retrieved by - -[source,js] ------------------------------------------- -GET _scripts/ ------------------------------------------- -// CONSOLE -// TEST[continued] - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "script" : { - "lang" : "mustache", - "source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}", - "options": { - "content_type" : "application/json; charset=UTF-8" - } - }, - "_id": "", - "found": true -} ------------------------------------------- -// TESTRESPONSE - -This template can be deleted by - -[source,js] ------------------------------------------- -DELETE _scripts/ ------------------------------------------- -// CONSOLE -// TEST[continued] - -////////////////////////// - -We want to be sure that the template has been created, -because we'll use it later. - -[source,js] --------------------------------------------------- -{ - "acknowledged" : true -} --------------------------------------------------- -// TESTRESPONSE - -////////////////////////// - -To use a stored template at search time use: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "", <1> - "params": { - "query_string": "search for these words" - } -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] -<1> Name of the stored template script. - -[float] -==== Validating templates - -A template can be rendered in a response with given parameters using - -[source,js] ------------------------------------------- -GET _render/template -{ - "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", - "params": { - "statuses" : { - "status": [ "pending", "published" ] - } - } -} ------------------------------------------- -// CONSOLE - -This call will return the rendered template: - -[source,js] ------------------------------------------- -{ - "template_output": { - "query": { - "terms": { - "status": [ <1> - "pending", - "published" - ] - } - } - } -} ------------------------------------------- -// TESTRESPONSE -<1> `status` array has been populated with values from the `params` object. - -Pre-registered templates can also be rendered using - -[source,js] ------------------------------------------- -GET _render/template/ -{ - "params": { - "..." - } -} ------------------------------------------- -// NOTCONSOLE - -[float] -===== Explain - -You can use `explain` parameter when running a template: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "explain": true -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] - -[float] -===== Profiling - -You can use `profile` parameter when running a template: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "profile": true -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] - [[multi-search-template]] == Multi Search Template diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 8154f2e701be2..f72eb7d68227e 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -37,4 +37,9 @@ Or we can search across all available indices using `_all`: GET /_all/_search?q=tag:wow --------------------------------------------------- // CONSOLE -// TEST[setup:twitter] \ No newline at end of file +// TEST[setup:twitter] + +[float] +[[search-partial-responses]] +=== Partial responses +To ensure fast responses, the search API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index c0b527c06e550..72ca2c33ebab0 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -43,12 +43,9 @@ PUT music Mapping supports the following parameters: +[horizontal] `analyzer`:: The index analyzer to use, defaults to `simple`. - In case you are wondering why we did not opt for the `standard` - analyzer: We try to have easy to understand behaviour here, and if you - index the field content `At the Drive-in`, you will not get any - suggestions for `a`, nor for `d` (the first non stopword). `search_analyzer`:: The search analyzer to use, defaults to value of `analyzer`. @@ -70,7 +67,7 @@ Mapping supports the following parameters: Limits the length of a single input, defaults to `50` UTF-16 code points. This limit is only used at index time to reduce the total number of characters per input string in order to prevent massive inputs from - bloating the underlying datastructure. Most usecases won't be influenced + bloating the underlying datastructure. Most use cases won't be influenced by the default value since prefix completions seldom grow beyond prefixes longer than a handful of characters. @@ -97,6 +94,7 @@ PUT music/_doc/1?refresh The following parameters are supported: +[horizontal] `input`:: The input to store, this can be an array of strings or just a string. This field is mandatory. @@ -285,6 +283,7 @@ Which should look like: The basic completion suggester query supports the following parameters: +[horizontal] `field`:: The name of the field on which to run the query (required). `size`:: The number of suggestions to return (defaults to `5`). `skip_duplicates`:: Whether duplicate suggestions should be filtered out (defaults to `false`). @@ -326,13 +325,13 @@ POST music/_search?pretty -------------------------------------------------- // CONSOLE -WARNING: when set to true this option can slow down search because more suggestions +WARNING: When set to true, this option can slow down search because more suggestions need to be visited to find the top N. [[fuzzy]] ==== Fuzzy queries -The completion suggester also supports fuzzy queries - this means, +The completion suggester also supports fuzzy queries -- this means you can have a typo in your search and still get results back. [source,js] diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 9c2c56cc40fec..9bf28eff9d933 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -33,12 +33,12 @@ PUT test "trigram": { "type": "custom", "tokenizer": "standard", - "filter": ["shingle"] + "filter": ["lowercase","shingle"] }, "reverse": { "type": "custom", "tokenizer": "standard", - "filter": ["reverse"] + "filter": ["lowercase","reverse"] } }, "filter": { @@ -139,21 +139,21 @@ The response contains suggestions scored by the most likely spell correction fir [horizontal] `field`:: - the name of the field used to do n-gram lookups for the + The name of the field used to do n-gram lookups for the language model, the suggester will use this field to gain statistics to score corrections. This field is mandatory. `gram_size`:: - sets max size of the n-grams (shingles) in the `field`. - If the field doesn't contain n-grams (shingles) this should be omitted + Sets max size of the n-grams (shingles) in the `field`. + If the field doesn't contain n-grams (shingles), this should be omitted or set to `1`. Note that Elasticsearch tries to detect the gram size - based on the specified `field`. If the field uses a `shingle` filter the + based on the specified `field`. If the field uses a `shingle` filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. `real_word_error_likelihood`:: - the likelihood of a term being a + The likelihood of a term being a misspelled even if the term exists in the dictionary. The default is - `0.95` corresponding to 5% of the real words are misspelled. + `0.95`, meaning 5% of the real words are misspelled. `confidence`:: @@ -165,33 +165,33 @@ The response contains suggestions scored by the most likely spell correction fir to `0.0` the top N candidates are returned. The default is `1.0`. `max_errors`:: - the maximum percentage of the terms that at most + The maximum percentage of the terms considered to be misspellings in order to form a correction. This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. The - default is set to `1.0` which corresponds to that only corrections with - at most 1 misspelled term are returned. Note that setting this too high - can negatively impact performance. Low values like `1` or `2` are recommended + default is set to `1.0`, meaning only corrections with + at most one misspelled term are returned. Note that setting this too high + can negatively impact performance. Low values like `1` or `2` are recommended; otherwise the time spend in suggest calls might exceed the time spend in query execution. `separator`:: - the separator that is used to separate terms in the + The separator that is used to separate terms in the bigram field. If not set the whitespace character is used as a separator. `size`:: - the number of candidates that are generated for each - individual query term Low numbers like `3` or `5` typically produce good + The number of candidates that are generated for each + individual query term. Low numbers like `3` or `5` typically produce good results. Raising this can bring up terms with higher edit distances. The default is `5`. `analyzer`:: - Sets the analyzer to analyse to suggest text with. + Sets the analyzer to analyze to suggest text with. Defaults to the search analyzer of the suggest field passed via `field`. `shard_size`:: - Sets the maximum number of suggested term to be + Sets the maximum number of suggested terms to be retrieved from each individual shard. During the reduce phase, only the top N suggestions are returned based on the `size` option. Defaults to `5`. @@ -202,7 +202,7 @@ The response contains suggestions scored by the most likely spell correction fir `highlight`:: Sets up suggestion highlighting. If not provided then no `highlighted` field is returned. If provided must - contain exactly `pre_tag` and `post_tag` which are + contain exactly `pre_tag` and `post_tag`, which are wrapped around the changed tokens. If multiple tokens in a row are changed the entire phrase of changed tokens is wrapped rather than each token. @@ -217,7 +217,7 @@ The response contains suggestions scored by the most likely spell correction fir variable, which should be used in your query. You can still specify your own template `params` -- the `suggestion` value will be added to the variables you specify. Additionally, you can specify a `prune` to control - if all phrase suggestions will be returned, when set to `true` the suggestions + if all phrase suggestions will be returned; when set to `true` the suggestions will have an additional option `collate_match`, which will be `true` if matching documents for the phrase was found, `false` otherwise. The default value for `prune` is `false`. @@ -267,38 +267,63 @@ POST _search The `phrase` suggester supports multiple smoothing models to balance weight between infrequent grams (grams (shingles) are not existing in -the index) and frequent grams (appear at least once in the index). +the index) and frequent grams (appear at least once in the index). The +smoothing model can be selected by setting the `smoothing` parameter +to one of the following options. Each smoothing model supports specific +properties that can be configured. [horizontal] `stupid_backoff`:: - a simple backoff model that backs off to lower + A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. The default `discount` is `0.4`. Stupid Backoff is the default model. `laplace`:: - a smoothing model that uses an additive smoothing where a + A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance - weights, The default `alpha` is `0.5`. + weights. The default `alpha` is `0.5`. `linear_interpolation`:: - a smoothing model that takes the weighted - mean of the unigrams, bigrams and trigrams based on user supplied + A smoothing model that takes the weighted + mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). Linear Interpolation doesn't have any default values. All parameters (`trigram_lambda`, `bigram_lambda`, `unigram_lambda`) must be supplied. +[source,js] +-------------------------------------------------- +POST _search +{ + "suggest": { + "text" : "obel prize", + "simple_phrase" : { + "phrase" : { + "field" : "title.trigram", + "size" : 1, + "smoothing" : { + "laplace" : { + "alpha" : 0.7 + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + ==== Candidate Generators The `phrase` suggester uses candidate generators to produce a list of possible terms per term in the given text. A single candidate generator is similar to a `term` suggester called for each individual term in the text. The output of the generators is subsequently scored in combination -with the candidates from the other terms to for suggestion candidates. +with the candidates from the other terms for suggestion candidates. Currently only one type of candidate generator is supported, the `direct_generator`. The Phrase suggest API accepts a list of generators -under the key `direct_generator` each of the generators in the list are +under the key `direct_generator`; each of the generators in the list is called per term in the original text. ==== Direct Generators @@ -320,7 +345,7 @@ The direct generators support the following parameters: as an optimization to generate fewer suggestions to test on each shard and are not rechecked when combining the suggestions generated on each shard. Thus `missing` will generate suggestions for terms on shards that do - not contain them even other shards do contain them. Those should be + not contain them even if other shards do contain them. Those should be filtered out using `confidence`. Three possible values can be specified: ** `missing`: Only generate suggestions for terms that are not in the shard. This is the default. @@ -332,7 +357,7 @@ The direct generators support the following parameters: `max_edits`:: The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be a value between 1 - and 2. Any other value result in an bad request error being thrown. + and 2. Any other value results in a bad request error being thrown. Defaults to 2. `prefix_length`:: @@ -347,7 +372,7 @@ The direct generators support the following parameters: `max_inspections`:: A factor that is used to multiply with the - `shards_size` in order to inspect more candidate spell corrections on + `shards_size` in order to inspect more candidate spelling corrections on the shard level. Can improve accuracy at the cost of performance. Defaults to 5. @@ -356,32 +381,31 @@ The direct generators support the following parameters: suggestion should appear in. This can be specified as an absolute number or as a relative percentage of number of documents. This can improve quality by only suggesting high frequency terms. Defaults to 0f and is - not enabled. If a value higher than 1 is specified then the number + not enabled. If a value higher than 1 is specified, then the number cannot be fractional. The shard level document frequencies are used for this option. `max_term_freq`:: - The maximum threshold in number of documents a + The maximum threshold in number of documents in which a suggest text token can exist in order to be included. Can be a relative - percentage number (e.g 0.4) or an absolute number to represent document - frequencies. If an value higher than 1 is specified then fractional can + percentage number (e.g., 0.4) or an absolute number to represent document + frequencies. If a value higher than 1 is specified, then fractional can not be specified. Defaults to 0.01f. This can be used to exclude high - frequency terms from being spellchecked. High frequency terms are - usually spelled correctly on top of this also improves the spellcheck + frequency terms -- which are usually spelled correctly -- from being spellchecked. This also improves the spellcheck performance. The shard level document frequencies are used for this option. `pre_filter`:: - a filter (analyzer) that is applied to each of the + A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. This filter is applied to the original token before candidates are generated. `post_filter`:: - a filter (analyzer) that is applied to each of the + A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. -The following example shows a `phrase` suggest call with two generators, -the first one is using a field containing ordinary indexed terms and the +The following example shows a `phrase` suggest call with two generators: +the first one is using a field containing ordinary indexed terms, and the second one uses a field that uses terms indexed with a `reverse` filter (tokens are index in reverse order). This is used to overcome the limitation of the direct generators to require a constant prefix to provide @@ -416,6 +440,6 @@ POST _search `pre_filter` and `post_filter` can also be used to inject synonyms after candidates are generated. For instance for the query `captain usq` we -might generate a candidate `usa` for term `usq` which is a synonym for -`america` which allows to present `captain america` to the user if this +might generate a candidate `usa` for the term `usq`, which is a synonym for +`america`. This allows us to present `captain america` to the user if this phrase scores high enough. diff --git a/docs/reference/search/suggesters/term-suggest.asciidoc b/docs/reference/search/suggesters/term-suggest.asciidoc index 65b5c3dd9ac8b..bcfd6a9f12946 100644 --- a/docs/reference/search/suggesters/term-suggest.asciidoc +++ b/docs/reference/search/suggesters/term-suggest.asciidoc @@ -53,9 +53,6 @@ doesn't take the query into account that is part of request. ==== Other term suggest options: [horizontal] -`lowercase_terms`:: - Lower cases the suggest text terms after text analysis. - `max_edits`:: The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be a value diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index d4762d9f42fb1..78f76c302c268 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -1,13 +1,13 @@ [role="xpack"] [[auditing-settings]] -=== Auditing Security Settings +=== Auditing security settings ++++ -Auditing Settings +Auditing settings ++++ All of these settings can be added to the `elasticsearch.yml` configuration file. For more information, see -{xpack-ref}/auditing.html[Auditing Security Events]. +{stack-ov}/auditing.html[Auditing Security Events]. [[general-audit-settings]] ==== General Auditing Settings diff --git a/docs/reference/settings/ccr-settings.asciidoc b/docs/reference/settings/ccr-settings.asciidoc new file mode 100644 index 0000000000000..286bb421662ff --- /dev/null +++ b/docs/reference/settings/ccr-settings.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[[ccr-settings]] +=== {ccr-cap} settings + +These {ccr} settings can be dynamically updated on a live cluster with the +<>. + +[float] +[[ccr-recovery-settings]] +==== Remote recovery settings + +The following setting can be used to rate-limit the data transmitted during +{stack-ov}/remote-recovery.html[remote recoveries]: + +`ccr.indices.recovery.max_bytes_per_sec` (<>):: +Limits the total inbound and outbound remote recovery traffic on each node. +Since this limit applies on each node, but there may be many nodes performing +remote recoveries concurrently, the total amount of remote recovery bytes may be +much higher than this limit. If you set this limit too high then there is a risk +that ongoing remote recoveries will consume an excess of bandwidth (or other +resources) which could destabilize the cluster. This setting is used by both the +leader and follower clusters. For example if it is set to `20mb` on a leader, +the leader will only send `20mb/s` to the follower even if the follower is +requesting and can accept `60mb/s`. Defaults to `40mb`. + +[float] +[[ccr-advanced-recovery-settings]] +==== Advanced remote recovery settings + +The following _expert_ settings can be set to manage the resources consumed by +remote recoveries: + +`ccr.indices.recovery.max_concurrent_file_chunks` (<>):: +Controls the number of file chunk requests that can be sent in parallel per +recovery. As multiple remote recoveries might already running in parallel, +increasing this expert-level setting might only help in situations where remote +recovery of a single shard is not reaching the total inbound and outbound remote recovery traffic as configured by `ccr.indices.recovery.max_bytes_per_sec`. +Defaults to `5`. The maximum allowed value is `10`. + +`ccr.indices.recovery.chunk_size`(<>):: +Controls the chunk size requested by the follower during file transfer. Defaults to +`1mb`. + +`ccr.indices.recovery.recovery_activity_timeout`(<>):: +Controls the timeout for recovery activity. This timeout primarily applies on +the leader cluster. The leader cluster must open resources in-memory to supply +data to the follower during the recovery process. If the leader does not receive recovery requests from the follower for this period of time, it will close the resources. Defaults to 60 seconds. + +`ccr.indices.recovery.internal_action_timeout` (<>):: +Controls the timeout for individual network requests during the remote recovery +process. An individual action timing out can fail the recovery. Defaults to +60 seconds. diff --git a/docs/reference/settings/configuring-xes.asciidoc b/docs/reference/settings/configuring-xes.asciidoc deleted file mode 100644 index 29c6b95dddf0f..0000000000000 --- a/docs/reference/settings/configuring-xes.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[role="xpack"] -[[settings-xpack]] -== {xpack} Settings in {es} -++++ -{xpack} Settings -++++ - -include::{asciidoc-dir}/../../shared/settings.asciidoc[] -include::license-settings.asciidoc[] -include::ml-settings.asciidoc[] -include::notification-settings.asciidoc[] -include::sql-settings.asciidoc[] diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc new file mode 100644 index 0000000000000..0f0d94cedc2b5 --- /dev/null +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -0,0 +1,19 @@ +[role="xpack"] +[[ilm-settings]] +=== {ilm-cap} settings + +These index-level {ilm-init} settings are typically configured through index +templates. For more information, see <>. + +`index.lifecycle.name`:: +The name of the policy to use to manage the index. + +`index.lifecycle.rollover_alias`:: +The index alias to update when the index rolls over. Specify when using a +policy that contains a rollover action. When the index rolls over, the alias is +updated to reflect that the index is no longer the write index. For more +information about rollover, see <>. + +`indices.lifecycle.poll_interval`:: +(<>) How often {ilm} checks for indices that meet policy +criteria. Defaults to `10m`. diff --git a/docs/reference/settings/license-settings.asciidoc b/docs/reference/settings/license-settings.asciidoc index 791d3f61d4598..d344d2e6532e7 100644 --- a/docs/reference/settings/license-settings.asciidoc +++ b/docs/reference/settings/license-settings.asciidoc @@ -1,13 +1,10 @@ [role="xpack"] [[license-settings]] -=== {xpack} License Settings -++++ -License Settings -++++ +=== License settings You can configure this licensing setting in the `elasticsearch.yml` file. For more information, see -{xpack-ref}/license-management.html[{xpack} License Management]. +{stack-ov}/license-management.html[License management]. `xpack.license.self_generated.type`:: Set to `basic` (default) to enable basic {xpack} features. + diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index 113f264a31331..3b957eb68654c 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -7,6 +7,11 @@ You do not need to configure any settings to use {ml}. It is enabled by default. +IMPORTANT: {ml-cap} uses SSE4.2 instructions, so will only work on machines whose +CPUs https://en.wikipedia.org/wiki/SSE4#Supporting_CPUs[support] SSE4.2. If you +run {es} on older hardware you must disable {ml} (by setting `xpack.ml.enabled` +to `false`). + All of these settings can be added to the `elasticsearch.yml` configuration file. The dynamic settings can also be updated across a cluster with the <>. diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index c633088bc5ed4..6cd5604ae1f04 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[monitoring-settings]] -=== Monitoring settings in Elasticsearch +=== Monitoring settings in Elasticsearch ++++ Monitoring settings ++++ @@ -8,8 +8,8 @@ By default, monitoring is enabled but data collection is disabled. To enable data collection, use the `xpack.monitoring.collection.enabled` setting. -You can configure these monitoring settings in the `elasticsearch.yml` file. -Some of them can also be set across the cluster by using the +You can configure these monitoring settings in the `elasticsearch.yml` file. You +can also dynamically set some of these settings using the <>. TIP: Cluster settings take precedence over settings in the `elasticsearch.yml` @@ -22,12 +22,10 @@ Logstash, configure {logstash-ref}/configuring-logstash.html#monitoring-settings[`xpack.monitoring` settings] in `logstash.yml`. -For more information, see -{xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack]. - [float] [[general-monitoring-settings]] ==== General Monitoring Settings + `xpack.monitoring.enabled`:: Set to `true` (default) to enable {es} {monitoring} for {es} on the node. + @@ -41,7 +39,8 @@ to `true`. Its default value is `false`. ==== Monitoring Collection Settings The `xpack.monitoring.collection` settings control how data is collected from -your Elasticsearch nodes. +your Elasticsearch nodes. You can dynamically change all monitoring collection +settings using the <>. `xpack.monitoring.collection.enabled` (<>):: @@ -53,8 +52,7 @@ ignored. `xpack.monitoring.collection.interval` (<>):: Setting to `-1` to disable data collection is no longer supported beginning with -7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to -`false` instead.] +7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to `false` instead.] + Controls how often data samples are collected. Defaults to `10s`. If you modify the collection interval, set the `xpack.monitoring.min_interval_seconds` @@ -68,9 +66,13 @@ all monitoring collection. However, this setting simply disables the collection data while still allowing other data (e.g., Kibana, Logstash, Beats, or APM Server monitoring data) to pass through this cluster. -`xpack.monitoring.collection.cluster.stats.timeout`:: +`xpack.monitoring.collection.cluster.stats.timeout` (<>):: + +(<>) Timeout for collecting the cluster statistics. Defaults to `10s`. + +`xpack.monitoring.collection.node.stats.timeout` (<>):: -Sets the timeout for collecting the cluster statistics. Defaults to `10s`. +(<>) Timeout for collecting the node statistics. Defaults to `10s`. `xpack.monitoring.collection.indices` (<>):: @@ -81,22 +83,22 @@ monitor all indexes that start with `test` except for `test3`. System indices li always start with a `.`, and generally should be monitored. Consider adding `.*` to the list of indices ensure monitoring of system indices. For example `.*,test*,-test3` -`xpack.monitoring.collection.index.stats.timeout`:: +`xpack.monitoring.collection.index.stats.timeout` (<>):: -Sets the timeout for collecting index statistics. Defaults to `10s`. +(<>) Timeout for collecting index statistics. Defaults to `10s`. -`xpack.monitoring.collection.index.recovery.active_only`:: +`xpack.monitoring.collection.index.recovery.active_only` (<>):: Controls whether or not all recoveries are collected. Set to `true` to collect only active recoveries. Defaults to `false`. -`xpack.monitoring.collection.index.recovery.timeout`:: +`xpack.monitoring.collection.index.recovery.timeout` (<>):: -Sets the timeout for collecting the recovery information. Defaults to `10s`. +(<>) Timeout for collecting the recovery information. Defaults to `10s`. -`xpack.monitoring.history.duration`:: +`xpack.monitoring.history.duration` (<>):: -Sets the retention duration beyond which the indices created by a Monitoring +(<>) Retention duration beyond which the indices created by a Monitoring exporter are automatically deleted. Defaults to `7d` (7 days). + -- @@ -112,9 +114,9 @@ the `http` exporter will not be deleted automatically. Configures where the agent stores monitoring data. By default, the agent uses a local exporter that indexes monitoring data on the cluster where it is installed. Use an HTTP exporter to send data to a separate monitoring cluster. For more -information, see <>, -<>, and -{xpack-ref}/how-monitoring-works.html[How Monitoring Works]. +information, see <>, +<>, and +<>. [float] [[local-exporter-settings]] @@ -202,12 +204,12 @@ The password for the `auth.username`. `connection.timeout`:: -The amount of time that the HTTP connection is supposed to wait for a socket to open for the +(<>) Amount of time that the HTTP connection is supposed to wait for a socket to open for the request. The default value is `6s`. `connection.read_timeout`:: -The amount of time that the HTTP connection is supposed to wait for a socket to +(<>) Amount of time that the HTTP connection is supposed to wait for a socket to send back a response. The default value is `10 * connection.timeout` (`60s` if neither are set). `ssl`:: @@ -278,5 +280,6 @@ For example: `["elasticsearch_version_mismatch","xpack_license_expiration"]`. :component: {monitoring} :verifies: :server!: +:ssl-context: monitoring include::ssl-settings.asciidoc[] diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index c3f0ca3b8ce36..a50024837157b 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -1,13 +1,14 @@ [role="xpack"] [[notification-settings]] -=== {watcher} Settings in Elasticsearch +=== {watcher} settings in Elasticsearch + +[subs="attributes"] ++++ -{watcher} Settings +{watcher} settings ++++ You configure {watcher} settings to set up {watcher} and send notifications via <>, -<>, <>, and <>. @@ -26,18 +27,16 @@ Set to `false` to disable {watcher} on the node. `xpack.watcher.encrypt_sensitive_data`:: Set to `true` to encrypt sensitive data. If this setting is enabled, you must also specify the `xpack.watcher.encryption_key` setting. For more -information, see -{xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. +information, see <>. `xpack.watcher.encryption_key` (<>):: Specifies the path to a file that contains a key for encrypting sensitive data. If `xpack.watcher.encrypt_sensitive_data` is set to `true`, this setting is -required. For more information, see -{xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. +required. For more information, see <>. `xpack.watcher.history.cleaner_service.enabled`:: -added[6.3.0,Default changed to `true`.] -deprecated[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] +added:[6.3.0,Default changed to `true`.] +deprecated:[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] + Set to `true` (default) to enable the cleaner service. If this setting is `true`, the `xpack.monitoring.enabled` setting must also be set to `true` with @@ -67,10 +66,10 @@ Specifies the maximum size an HTTP response is allowed to have, defaults to `xpack.http.whitelist`:: A list of URLs, that the internal HTTP client is allowed to connect to. This -client is used in the HTTP input, the webhook, the slack, pagerduty, hipchat +client is used in the HTTP input, the webhook, the slack, pagerduty, and jira actions. This setting can be updated dynamically. It defaults to `*` allowing everything. Note: If you configure this setting and you are using one -of the slack/pagerduty/hipchat actions, you have to ensure that the +of the slack/pagerduty actions, you have to ensure that the corresponding endpoints are whitelisted as well. [[ssl-notification-settings]] @@ -78,6 +77,7 @@ corresponding endpoints are whitelisted as well. :component: {watcher} :verifies: :server!: +:ssl-context: watcher include::ssl-settings.asciidoc[] @@ -86,22 +86,25 @@ include::ssl-settings.asciidoc[] ==== Email Notification Settings You can configure the following email notification settings in `elasticsearch.yml`. For more information about sending notifications -via email, see {xpack-ref}/actions-email.html#configuring-email-actions[Configuring Email]. +via email, see <>. `xpack.notification.email.account`:: Specifies account information for sending notifications via email. You can specify the following email account attributes: - ++ +-- [[email-account-attributes]] + `profile` (<>);; - The {xpack-ref}/actions-email.html#configuring-email[email profile] to use to build the MIME + The <> to use to build the MIME messages that are sent from the account. Valid values: `standard`, `gmail` and `outlook`. Defaults to `standard`. `email_defaults.*` (<>);; An optional set of email attributes to use as defaults - for the emails sent from the account. See {xpack-ref}/actions-email.html#email-action-attributes[ - Email Action Attributes] for the supported attributes. + for the emails sent from the account. See + <> for the supported + attributes. `smtp.auth` (<>);; Set to `true` to attempt to authenticate the user using the @@ -158,14 +161,17 @@ can specify the following email account attributes: `smtp.wait_on_quit` (<>);; If set to false the QUIT command is sent and the connection closed. If set to true, the QUIT command is sent and a reply is waited for. True by default. +-- `xpack.notification.email.html.sanitization.allow`:: Specifies the HTML elements that are allowed in email notifications. For -more information, see {xpack-ref}/actions-email.html#email-html-sanitization[Configuring HTML -Sanitization Options]. You can specify individual HTML elements -and the following HTML feature groups: - +more information, see +<>. You can +specify individual HTML elements and the following HTML feature groups: ++ +-- [[html-feature-groups]] + `_tables`;; All table related elements: ``, `` and `
`, `
`. @@ -197,6 +203,7 @@ and the following HTML feature groups: `img:embedded`;; Only embedded images. Embedded images can only use the `cid:` URL protocol in their `src` attribute. +-- `xpack.notification.email.html.sanitization.disallow`:: Specifies the HTML elements that are NOT allowed in email notifications. @@ -207,65 +214,18 @@ HTML feature groups>>. Set to `false` to completely disable HTML sanitation. Not recommended. Defaults to `true`. -[float] -[[hipchat-notification-settings]] -==== HipChat Notification Settings -You can configure the following HipChat notification settings in -`elasticsearch.yml`. For more information about sending notifications -via HipChat, see {xpack-ref}/actions-hipchat.html#configuring-hipchat-actions[Configuring HipChat]. - -`xpack.notification.hipchat` :: -Specifies account information for sending notifications -via HipChat. You can specify the following HipChat account attributes: - -[[hipchat-account-attributes]] - `profile`;; - The HipChat account profile to use: `integration`, - `user`, or `v1`. Required. - - `secure_auth_token` (<>);; - The authentication token to use to access the HipChat API. Required. - - `host`;; - The HipChat server hostname. Defaults to `api.hipchat.com`. - - `port`;; - The HipChat server port number. Defaults to 443. - - `room`;; - The room you want to send messages to. Must be specified - if the `profile` is set to `integration`. Not valid for - the `user` or `vi` profiles. - - `user`;; - The HipChat user account to use to send messages. - Specified as an email address. Must be specified if the - `profile` is set to `user`. Not valid for the `integration` - or `v1` profiles. - - `message.format`;; - The format of the message: `text` or `html`. - Defaults to `html`. - - `message.color`;; - The background color of the notification in the room. - Defaults to `yellow`. - `message.notify`;; - Indicates whether people in the room should be - actively notified. Defaults to `false`. - - [float] [[slack-notification-settings]] ==== Slack Notification Settings You can configure the following Slack notification settings in `elasticsearch.yml`. For more information about sending notifications -via Slack, see {xpack-ref}/actions-slack.html#configuring-slack-actions[Configuring Slack]. +via Slack, see <>. `xpack.notification.slack` :: Specifies account information for sending notifications via Slack. You can specify the following Slack account attributes: - ++ +-- [[slack-account-attributes]] `secure_url` (<>);; @@ -293,19 +253,20 @@ via Slack. You can specify the following Slack account attributes: Specified as an array as defined in the https://api.slack.com/docs/attachments[ Slack attachments documentation]. - +-- [float] [[jira-notification-settings]] ==== Jira Notification Settings You can configure the following Jira notification settings in `elasticsearch.yml`. For more information about using notifications -to create issues in Jira, see {xpack-ref}/actions-jira.html#configuring-jira-actions[Configuring Jira]. +to create issues in Jira, see <>. `xpack.notification.jira` :: Specifies account information for using notifications to create issues in Jira. You can specify the following Jira account attributes: - ++ +-- [[jira-account-attributes]] `secure_url` (<>);; @@ -319,54 +280,58 @@ issues in Jira. You can specify the following Jira account attributes: `issue_defaults`;; Default fields values for the issue created in Jira. See - {xpack-ref}/actions-jira.html#jira-action-attributes[Jira Action Attributes] for more information. + <> for more information. Optional. - +-- [float] [[pagerduty-notification-settings]] ==== PagerDuty Notification Settings You can configure the following PagerDuty notification settings in `elasticsearch.yml`. For more information about sending notifications -via PagerDuty, see {xpack-ref}/actions-pagerduty.html#configuring-pagerduty-actions[Configuring PagerDuty]. +via PagerDuty, see <>. [[pagerduty-account-attributes]] `xpack.notification.pagerduty`:: Specifies account information for sending notifications via PagerDuty. You can specify the following PagerDuty account attributes: ++ +-- +`name`;; +A name for the PagerDuty account associated with the API key you +are using to access PagerDuty. Required. + +`secure_service_api_key` (<>);; +The https://developer.pagerduty.com/documentation/rest/authentication[ +PagerDuty API key] to use to access PagerDuty. Required. +-- ++ +`event_defaults`;; +Default values for +<>. +Optional. ++ +-- +`description`:: +A string that contains the default description for PagerDuty events. +If no default is configured, each PagerDuty action must specify a +`description`. - `name`;; - A name for the PagerDuty account associated with the API key you - are using to access PagerDuty. Required. - - `secure_service_api_key` (<>);; - The https://developer.pagerduty.com/documentation/rest/authentication[ - PagerDuty API key] to use to access PagerDuty. Required. - - - `event_defaults`;; - Default values for {xpack-ref}/actions-pagerduty.html#pagerduty-event-trigger-incident-attributes[ - PagerDuty event attributes]. Optional. - - `description`:: - A string that contains the default description for PagerDuty events. - If no default is configured, each PagerDuty action must specify a - `description`. - - `incident_key`:: - A string that contains the default incident key to use when sending - PagerDuty events. +`incident_key`:: +A string that contains the default incident key to use when sending +PagerDuty events. - `client`:: - A string that specifies the default monitoring client. +`client`:: +A string that specifies the default monitoring client. - `client_url`:: - The URL of the default monitoring client. +`client_url`:: +The URL of the default monitoring client. - `event_type`:: - The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. +`event_type`:: +The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. - `attach_payload`:: - Whether or not to provide the watch payload as context for - the event by default. Valid values: `true`, `false`. +`attach_payload`:: +Whether or not to provide the watch payload as context for +the event by default. Valid values: `true`, `false`. +-- \ No newline at end of file diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index b767b7869dbd9..dd27adec330c7 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -206,7 +206,7 @@ information, see {stack-ov}/setting-up-authentication.html[Setting up authentica ===== Settings valid for all realms `type`:: -The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required. +The type of the realm: `native`, `ldap`, `active_directory`, `pki`, or `file`. Required. `order`:: The priority of the realm within the realm chain. Realms with a lower order are @@ -278,10 +278,13 @@ Defaults to `true`. The `type` setting must be set to `ldap`. In addition to the <>, you can specify the following settings: -`url`:: Specifies one or more LDAP URLs in the format -`ldap[s]://:`. Multiple URLs can be defined using a comma -separated value or array syntax: `[ "ldaps://server1:636", "ldaps://server2:636" ]`. -`ldaps` and `ldap` URL protocols cannot be mixed in the same realm. Required. +`url`:: +One or more LDAP URLs in the `ldap[s]://:` format. Required. ++ +To provide multiple URLs, use a YAML array (`["ldap://server1:636", "ldap://server2:636"]`) +or comma-separated string (`"ldap://server1:636, ldap://server2:636"`). ++ +While both are supported, you can't mix the `ldap` and `ldaps` protocols. `load_balance.type`:: The behavior to use when there are multiple LDAP URLs defined. For supported @@ -547,11 +550,18 @@ The `type` setting must be set to `active_directory`. In addition to the the following settings: `url`:: -An LDAP URL of the form `ldap[s]://:`. {es} attempts to -authenticate against this URL. If the URL is not specified, it is derived from -the `domain_name` setting and assumes an unencrypted connection to port 389. -Defaults to `ldap://:389`. This setting is required when connecting -using SSL/TLS or when using a custom port. +One or more LDAP URLs in the `ldap[s]://:` format. Defaults to +`ldap://:389`. This setting is required when connecting using +SSL/TLS or when using a custom port. ++ +To provide multiple URLs, use a YAML array (`["ldap://server1:636", "ldap://server2:636"]`) +or comma-separated string (`"ldap://server1:636, ldap://server2:636"`). ++ +While both are supported, you can't mix the `ldap` and `ldaps` protocols. ++ +If no URL is provided, {es} uses a default of `ldap://:389`. This +default uses the `domain_name` setting value and assumes an unencrypted +connection to port 389. `load_balance.type`:: The behavior to use when there are multiple LDAP URLs defined. For supported @@ -1234,10 +1244,15 @@ through the list of URLs will continue until a successful connection is made. [[ssl-tls-settings]] ==== Default values for TLS/SSL settings In general, the values below represent the default values for the various TLS -settings. For more information, see +settings. +The prefixes for these settings are based on the context in which they are +used (e.g. `xpack.security.authc.realms.ldap.corp_ldap.ssl.verification_mode` +or `xpack.security.transport.ssl.supported_protocols`). + +For more information, see {stack-ov}/encrypting-communications.html[Encrypting communications]. -`ssl.supported_protocols`:: +`*.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. @@ -1247,7 +1262,7 @@ NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hell or `SSLv3`. See <>. -- -`ssl.client_authentication`:: +`*.ssl.client_authentication`:: Controls the server's behavior in regard to requesting a certificate from client connections. Valid values are `required`, `optional`, and `none`. `required` forces a client to present a certificate, while `optional` @@ -1255,7 +1270,7 @@ requests a client certificate but the client is not required to present one. Defaults to `required`, except for HTTP, which defaults to `none`. See <>. -`ssl.verification_mode`:: +`*.ssl.verification_mode`:: Controls the verification of certificates. Valid values are: - `full`, which verifies that the provided certificate is signed by a trusted authority (CA) and also verifies that the server's hostname (or IP @@ -1270,7 +1285,7 @@ Controls the verification of certificates. Valid values are: + The default value is `full`. -`ssl.cipher_suites`:: +`*.ssl.cipher_suites`:: Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`, @@ -1310,6 +1325,7 @@ a PKCS#12 container includes trusted certificate ("anchor") entries look for :client-auth-default: none :verifies!: :server: +:ssl-context: security-http include::ssl-settings.asciidoc[] @@ -1319,6 +1335,7 @@ include::ssl-settings.asciidoc[] :client-auth-default!: :verifies: :server: +:ssl-context: security-transport include::ssl-settings.asciidoc[] diff --git a/docs/reference/settings/sql-settings.asciidoc b/docs/reference/settings/sql-settings.asciidoc index baefe008c4206..3a109627440f4 100644 --- a/docs/reference/settings/sql-settings.asciidoc +++ b/docs/reference/settings/sql-settings.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[sql-settings]] -=== SQL Access Settings in Elasticsearch +=== SQL access settings in Elasticsearch ++++ -SQL Access Settings +SQL access settings ++++ SQL Access is enabled by default. You can configure diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index a4422b8fb2d3c..a9c8576a8c4e9 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -1,4 +1,3 @@ - ==== {component} TLS/SSL Settings You can configure the following TLS/SSL settings. If the settings are not configured, the {ref}/security-settings.html#ssl-tls-settings[Default TLS/SSL Settings] @@ -39,7 +38,7 @@ endif::verifies[] Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to ``. - +[#{ssl-context}-tls-ssl-key-trusted-certificate-settings] ===== {component} TLS/SSL Key and Trusted Certificate Settings The following settings are used to specify a private key, certificate, and the @@ -106,6 +105,7 @@ Password to the truststore. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the truststore. +[#{ssl-context}-pkcs12-files] ===== PKCS#12 Files {es} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) @@ -144,6 +144,7 @@ Password to the PKCS#12 file. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the PKCS#12 file. +[#{ssl-context}-pkcs11-tokens] ===== PKCS#11 Tokens {es} can be configured to use a PKCS#11 token that contains the private key, diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 0d04fd37ffba6..8fd5a44443ddc 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -23,19 +23,17 @@ platforms, but it is possible that it will work on other platforms too. [[jvm-version]] == Java (JVM) Version -Elasticsearch is built using Java, and requires at least -http://www.oracle.com/technetwork/java/javase/downloads/index.html[Java {jdk_major}] -in order to run. Only Oracle's Java and the OpenJDK are supported. The same JVM -version should be used on all Elasticsearch nodes and clients. - -We recommend installing Java version *{jdk} or a later version in the Java -{jdk_major} release series*. We recommend using a -link:/support/matrix[supported] +Elasticsearch is built using Java, and includes a bundled version of +http://openjdk.java.net[OpenJDK] from the JDK maintainers (GPLv2+CE) +within each distribution. The bundled JVM is the recommended JVM and +is located within the `jdk` directory of the Elasticsearch home directory. + +To use your own version of Java, set the `JAVA_HOME` environment variable. +If you must use a version of Java that is different from the bundled JVM, +we recommend using a link:/support/matrix[supported] http://www.oracle.com/technetwork/java/eol-135779.html[LTS version of Java]. Elasticsearch will refuse to start if a known-bad version of Java is used. - -The version of Java that Elasticsearch will use can be configured by setting -the `JAVA_HOME` environment variable. +The bundled JVM directory may be removed when using your own JVM. -- @@ -49,6 +47,24 @@ include::setup/secure-settings.asciidoc[] include::setup/logging-config.asciidoc[] +include::settings/audit-settings.asciidoc[] + +include::settings/ccr-settings.asciidoc[] + +include::settings/ilm-settings.asciidoc[] + +include::settings/license-settings.asciidoc[] + +include::settings/ml-settings.asciidoc[] + +include::settings/monitoring-settings.asciidoc[] + +include::settings/security-settings.asciidoc[] + +include::settings/sql-settings.asciidoc[] + +include::settings/notification-settings.asciidoc[] + include::setup/important-settings.asciidoc[] include::setup/sysconfig.asciidoc[] diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index df020bbd96276..4cf55a0072380 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -14,8 +14,7 @@ If you use {watcher} and have chosen to encrypt sensitive data (by setting the secure settings store. To pass this bootstrap check, you must set the `xpack.watcher.encryption_key` -on each node in the cluster. For more information, see -{xpack-ref}/encrypting-data.html[Encrypting Sensitive Data in {watcher}]. +on each node in the cluster. For more information, see <>. [float] === PKI realm check @@ -53,9 +52,8 @@ must also be valid. === SSL/TLS check //See TLSLicenseBootstrapCheck.java -In 6.0 and later releases, if you have a gold, platinum, or enterprise license -and {es} {security-features} are enabled, you must configure SSL/TLS for -internode-communication. +If you enable {es} {security-features}, unless you have a trial license, you +must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 9650a2b1abd13..d0f82619f897e 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -39,9 +39,10 @@ first election. In <>, with no discovery settings configured, this step is automatically performed by the nodes themselves. As this auto-bootstrapping is <>, when you start a brand new cluster in <>, you must explicitly list the names or IP addresses of the -master-eligible nodes whose votes should be counted in the very first election. -This list is set using the `cluster.initial_master_nodes` setting. +mode>>, you must explicitly list the master-eligible nodes whose votes should be +counted in the very first election. This list is set using the +`cluster.initial_master_nodes` setting. You should not use this setting when +restarting a cluster or adding a new node to an existing cluster. [source,yaml] -------------------------------------------------- @@ -49,18 +50,23 @@ discovery.seed_hosts: - 192.168.1.10:9300 - 192.168.1.11 <1> - seeds.mydomain.com <2> -cluster.initial_master_nodes: - - master-node-a <3> - - 192.168.1.12 <4> - - 192.168.1.13:9301 <5> +cluster.initial_master_nodes: <3> + - master-node-a + - master-node-b + - master-node-c -------------------------------------------------- <1> The port will default to `transport.profiles.default.port` and fallback to `transport.port` if not specified. <2> If a hostname resolves to multiple IP addresses then the node will attempt to discover other nodes at all resolved addresses. -<3> Initial master nodes can be identified by their <>. -<4> Initial master nodes can also be identified by their IP address. -<5> If multiple master nodes share an IP address then the port must be used to - disambiguate them. +<3> The initial master nodes should be identified by their + <>, which defaults to their hostname. Make sure that + the value in `cluster.initial_master_nodes` matches the `node.name` + exactly. If you use a fully-qualified domain name such as + `master-node-a.example.com` for your node names then you must use the + fully-qualified name in this list; conversely if `node.name` is a bare + hostname without any trailing qualifiers then you must also omit the + trailing qualifiers in `cluster.initial_master_nodes`. -For more information, see <>. +For more information, see <> and +<>. diff --git a/docs/reference/setup/important-settings/error-file.asciidoc b/docs/reference/setup/important-settings/error-file.asciidoc index d58a752ac28fa..d78b0e24ec7ad 100644 --- a/docs/reference/setup/important-settings/error-file.asciidoc +++ b/docs/reference/setup/important-settings/error-file.asciidoc @@ -5,7 +5,7 @@ By default, Elasticsearch configures the JVM to write fatal error logs to the default logging directory (this is `/var/log/elasticsearch` for the <> and <> package distributions, and the `logs` directory under the root of the Elasticsearch installation for the -<> archive distributions). These are logs +<> and <> archive distributions). These are logs produced by the JVM when it encounters a fatal error (e.g., a segmentation fault). If this path is not suitable for receiving logs, you should modify the entry `-XX:ErrorFile=...` in diff --git a/docs/reference/setup/important-settings/heap-dump-path.asciidoc b/docs/reference/setup/important-settings/heap-dump-path.asciidoc index fb8c7ff35f0d0..25f3fbcebcbf3 100644 --- a/docs/reference/setup/important-settings/heap-dump-path.asciidoc +++ b/docs/reference/setup/important-settings/heap-dump-path.asciidoc @@ -5,7 +5,7 @@ By default, Elasticsearch configures the JVM to dump the heap on out of memory exceptions to the default data directory (this is `/var/lib/elasticsearch` for the <> and <> package distributions, and the `data` directory under the root of the -Elasticsearch installation for the <> archive +Elasticsearch installation for the <> and <> archive distributions). If this path is not suitable for receiving heap dumps, you should modify the entry `-XX:HeapDumpPath=...` in <>. If you specify a directory, the JVM diff --git a/docs/reference/setup/important-settings/heap-size.asciidoc b/docs/reference/setup/important-settings/heap-size.asciidoc index 77aa23b61df45..6560df35c52ec 100644 --- a/docs/reference/setup/important-settings/heap-size.asciidoc +++ b/docs/reference/setup/important-settings/heap-size.asciidoc @@ -9,7 +9,7 @@ Elasticsearch will assign the entire heap specified in <> via the `Xms` (minimum heap size) and `Xmx` (maximum heap size) settings. -The value for these setting depends on the amount of RAM available on your +The value for these settings depends on the amount of RAM available on your server. Good rules of thumb are: * Set the minimum heap size (`Xms`) and maximum heap size (`Xmx`) to be equal to diff --git a/docs/reference/setup/important-settings/network-host.asciidoc b/docs/reference/setup/important-settings/network-host.asciidoc index 1788bfebc66b5..45061273cdefe 100644 --- a/docs/reference/setup/important-settings/network-host.asciidoc +++ b/docs/reference/setup/important-settings/network-host.asciidoc @@ -23,7 +23,7 @@ The `network.host` setting also understands some special values such as `_local_`, `_site_`, `_global_` and modifiers like `:ip4` and `:ip6`, details of which can be found in <>. -IMPORTANT: As soon you provide a custom setting for `network.host`, +IMPORTANT: As soon as you provide a custom setting for `network.host`, Elasticsearch assumes that you are moving from development mode to production mode, and upgrades a number of system startup checks from warnings to exceptions. See <> for more information. diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 26a207824af01..7e03ad3947b4c 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -16,12 +16,17 @@ Elasticsearch Service for free]. Elasticsearch is provided in the following package formats: [horizontal] -`zip`/`tar.gz`:: +Linux and MacOS `tar.gz` archives:: -The `zip` and `tar.gz` packages are suitable for installation on any system -and are the easiest choice for getting started with Elasticsearch on most systems. +The `tar.gz` archives are available for installation on any Linux distribution and MacOS. + -<> or <> +<> + +Windows `.zip` archive:: + +The `zip` archive is suitable for installation on Windows. ++ +<> `deb`:: @@ -41,9 +46,7 @@ Elasticsearch website or from our RPM repository. `msi`:: -beta[] -+ -The `msi` package is suitable for installation on Windows 64-bit systems with at least +beta[] The `msi` package is suitable for installation on Windows 64-bit systems with at least .NET 4.5 framework installed, and is the easiest choice for getting started with Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. + @@ -56,6 +59,13 @@ downloaded from the Elastic Docker Registry. + {ref}/docker.html[Install {es} with Docker] +`brew`:: + +Formulae are available from the Elastic Homebrew tap for installing +{es} on macOS with the Homebrew package manager. ++ +{ref}/brew.html[Install {es} on macOS with Homebrew] + [float] [[config-mgmt-tools]] === Configuration Management Tools @@ -69,7 +79,7 @@ Chef:: https://github.com/elastic/cookbook-elasticsearch[cookbook-elasticsearc Ansible:: https://github.com/elastic/ansible-elasticsearch[ansible-elasticsearch] -include::install/zip-targz.asciidoc[] +include::install/targz.asciidoc[] include::install/zip-windows.asciidoc[] @@ -81,3 +91,4 @@ include::install/windows.asciidoc[] include::install/docker.asciidoc[] +include::install/brew.asciidoc[] diff --git a/docs/reference/setup/install/brew.asciidoc b/docs/reference/setup/install/brew.asciidoc new file mode 100644 index 0000000000000..cfc9e4c3e05be --- /dev/null +++ b/docs/reference/setup/install/brew.asciidoc @@ -0,0 +1,69 @@ +[[brew]] +=== Install {es} on macOS with Homebrew + +Elastic publishes Homebrew formulae so you can install {es} with the +https://brew.sh/[Homebrew] package manager. + +To install with Homebrew, you first need to tap the +Elastic Homebrew repository: + +[source,sh] +------------------------- +brew tap elastic/tap +------------------------- + +Once you've tapped the Elastic Homebrew repo, you can use `brew install` to +install the default distribution of {es}: + +[source,sh] +------------------------- +brew install elastic/tap/elasticsearch-full +------------------------- + +This installs the most recently released default distribution of {es}. +To install the OSS distribution, specify `elastic/tap/elasticsearch-oss`. + +[[brew-layout]] +==== Directory layout for Homebrew installs + +When you install {es} with `brew install` the config files, logs, +and data directory are stored in the following locations. + +[cols="> + +| data + | The location of the data files of each index / shard allocated + on the node. Can hold multiple locations. + | /usr/local/var/lib/elasticsearch + | path.data + +| logs + | Log files location. + | /usr/local/var/log/elasticsearch + | path.logs + +| plugins + | Plugin files location. Each plugin will be contained in a subdirectory. + | /usr/local/var/homebrew/linked/elasticsearch/plugins + | + +|======================================================================= + +include::next-steps.asciidoc[] diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index 150252ecc5d14..6a9bab2ae0941 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -20,7 +20,7 @@ which should give you a response something like this: "version" : { "number" : "{version_qualified}", "build_flavor" : "{build_flavor}", - "build_type" : "zip", + "build_type" : "{build_type}", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index 97b4762338936..529c6b12ce520 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -11,9 +11,9 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. -NOTE: Elasticsearch requires Java 8 or later. Use the -http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] -or an open-source distribution such as http://openjdk.java.net[OpenJDK]. +NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJDK] +from the JDK maintainers (GPLv2+CE). To use your own version of Java, +see the <> [[deb-key]] ==== Import the Elasticsearch PGP Key @@ -173,6 +173,7 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] +[[deb-sysv-init-vs-systemd]] ==== SysV `init` vs `systemd` include::init-systemd.asciidoc[] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index b64b15703b215..e8dd1ee95957a 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -5,8 +5,9 @@ The images use https://hub.docker.com/_/centos/[centos:7] as the base image. A list of all published Docker images and tags is available at -https://www.docker.elastic.co[www.docker.elastic.co]. The source code is in -https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. +https://www.docker.elastic.co[www.docker.elastic.co]. The source files +are in +https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Github]. These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. @@ -61,6 +62,9 @@ ifeval::["{release-state}"!="unreleased"] docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" {docker-image} -------------------------------------------- +Note the use of <> that allows bypassing +the <> in a single-node development cluster. + endif::[] [[docker-cli-run-prod-mode]] @@ -81,7 +85,7 @@ The `vm.max_map_count` setting should be set permanently in `/etc/sysctl.conf`: -------------------------------------------- $ grep vm.max_map_count /etc/sysctl.conf vm.max_map_count=262144 ----------------------------------- +-------------------------------------------- To apply the setting on a live system type: `sysctl -w vm.max_map_count=262144` -- @@ -168,6 +172,7 @@ services: container_name: es01 environment: - node.name=es01 + - discovery.seed_hosts=es02 - cluster.initial_master_nodes=es01,es02 - cluster.name=docker-cluster - bootstrap.memory_lock=true @@ -285,6 +290,7 @@ comfortable with them adding the `--batch` flag to the plugin install command. See {plugins}/_other_command_line_parameters.html[Plugin Management documentation] for more details. +[[override-image-default]] ===== D. Override the image's default https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options[CMD] Options can be passed as command-line options to the {es} process by @@ -326,7 +332,7 @@ data through a bind-mount: As a last resort, you can also force the container to mutate the ownership of any bind-mounts used for the <> through the -environment variable `TAKE_FILE_OWNERSHIP`. Inn this case, they will be owned by +environment variable `TAKE_FILE_OWNERSHIP`. In this case, they will be owned by uid:gid `1000:0` providing read/write access to the {es} process as required. -- @@ -366,6 +372,12 @@ published ports with `--publish-all`, unless you are pinning one container per h . Use the `ES_JAVA_OPTS` environment variable to set heap size. For example, to use 16GB, use `-e ES_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`. ++ +-- +NOTE: You still need to <> even if you are +https://docs.docker.com/config/containers/resource_constraints/#limit-a-containers-access-to-memory[limiting +memory access] to the container. +-- . Pin your deployments to a specific version of the {es} Docker image, for example +docker.elastic.co/elasticsearch/elasticsearch:{version}+. diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index a450e202b6896..041784bb96862 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -7,7 +7,7 @@ Elasticsearch on any RPM-based system such as OpenSuSE, SLES, Centos, Red Hat, and Oracle Enterprise. NOTE: RPM install is not supported on distributions with old versions of RPM, -such as SLES 11 and CentOS 5. Please see <> instead. +such as SLES 11 and CentOS 5. Please see <> instead. include::license.asciidoc[] @@ -15,9 +15,9 @@ The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. -NOTE: Elasticsearch requires Java 8 or later. Use the -http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] -or an open-source distribution such as http://openjdk.java.net[OpenJDK]. +NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJDK] +from the JDK maintainers (GPLv2+CE). To use your own version of Java, +see the <> [[rpm-key]] ==== Import the Elasticsearch PGP Key @@ -160,6 +160,7 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] +[[rpm-sysv-init-vs-systemd]] ==== SysV `init` vs `systemd` include::init-systemd.asciidoc[] diff --git a/docs/reference/setup/install/zip-targz-daemon.asciidoc b/docs/reference/setup/install/targz-daemon.asciidoc similarity index 97% rename from docs/reference/setup/install/zip-targz-daemon.asciidoc rename to docs/reference/setup/install/targz-daemon.asciidoc index 31d9c3c2e7437..1325503687a07 100644 --- a/docs/reference/setup/install/zip-targz-daemon.asciidoc +++ b/docs/reference/setup/install/targz-daemon.asciidoc @@ -14,7 +14,7 @@ To shut down Elasticsearch, kill the process ID recorded in the `pid` file: [source,sh] -------------------------------------------- -kill `cat pid` +pkill -F pid -------------------------------------------- NOTE: The startup scripts provided in the <> and <> diff --git a/docs/reference/setup/install/zip-targz-start.asciidoc b/docs/reference/setup/install/targz-start.asciidoc similarity index 100% rename from docs/reference/setup/install/zip-targz-start.asciidoc rename to docs/reference/setup/install/targz-start.asciidoc diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/targz.asciidoc similarity index 74% rename from docs/reference/setup/install/zip-targz.asciidoc rename to docs/reference/setup/install/targz.asciidoc index d532438103754..75c9fdb42188c 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/targz.asciidoc @@ -1,9 +1,7 @@ -[[zip-targz]] -=== Install Elasticsearch with `.zip` or `.tar.gz` +[[targz]] +=== Install Elasticsearch from archive on Linux or MacOS -Elasticsearch is provided as a `.zip` and as a `.tar.gz` package. These -packages can be used to install Elasticsearch on any system and are the -easiest package format to use when trying out Elasticsearch. +Elasticsearch is as a `.tar.gz` archive for Linux and MacOS. include::license.asciidoc[] @@ -12,12 +10,12 @@ link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. -NOTE: Elasticsearch requires Java 8 or later. Use the -http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] -or an open-source distribution such as http://openjdk.java.net[OpenJDK]. +NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJDK] +from the JDK maintainers (GPLv2+CE). To use your own version of Java, +see the <> -[[install-zip]] -==== Download and install the `.zip` package +[[install-linux]] +==== Download and install archive for Linux ifeval::["{release-state}"=="unreleased"] @@ -27,30 +25,28 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The `.zip` archive for Elasticsearch v{version} can be downloaded and installed as follows: - +The Linux archive for Elasticsearch v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip.sha512 -shasum -a 512 -c elasticsearch-{version}-windows-x86_64.zip.sha512 <1> -unzip elasticsearch-{version}-windows-x86_64.zip +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz.sha512 +shasum -a 512 -c elasticsearch-{version}-linux-x86_64.tar.gz.sha512 <1> +tar -xzf elasticsearch-{version}-linux-x86_64.tar.gz cd elasticsearch-{version}/ <2> -------------------------------------------- -<1> Compares the SHA of the downloaded `.zip` archive and the published checksum, which should output - `elasticsearch-{version}-windows-x86_64.zip: OK`. +<1> Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output + `elasticsearch-{version}-linux-x86_64.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. -Alternatively, you can download the following package, which contains only -features that are available under the Apache 2.0 license: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-windows-x86_64.zip +Alternatively, you can download the following package, which includes only +Apache 2.0 licensed code: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-linux-x86_64.tar.gz endif::[] - -[[install-targz]] -==== Download and install the `.tar.gz` package +[[install-macos]] +==== Download and install archive for MacOS ifeval::["{release-state}"=="unreleased"] @@ -60,28 +56,28 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The `.tar.gz` archive for Elasticsearch v{version} can be downloaded and installed as follows: +The MacOS archive for Elasticsearch v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz.sha512 -shasum -a 512 -c elasticsearch-{version}-linux-x86_64.tar.gz.sha512 <1> -tar -xzf elasticsearch-{version}-linux-x86_64.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-darwin-x86_64.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-darwin-x86_64.tar.gz.sha512 +shasum -a 512 -c elasticsearch-{version}-darwin-x86_64.tar.gz.sha512 <1> +tar -xzf elasticsearch-{version}-darwin-x86_64.tar.gz cd elasticsearch-{version}/ <2> -------------------------------------------- <1> Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output - `elasticsearch-{version}-linux-x86_64.tar.gz: OK`. + `elasticsearch-{version}-darwin-x86_64.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. Alternatively, you can download the following package, which includes only Apache 2.0 licensed code: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-linux-x86_64.tar.gz +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-darwin-x86_64.tar.gz endif::[] ifdef::include-xpack[] -[[zip-targz-enable-indices]] +[[targz-enable-indices]] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within {es}. @@ -89,8 +85,8 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] -[[zip-targz-running]] -include::zip-targz-start.asciidoc[] +[[targz-running]] +include::targz-start.asciidoc[] include::check-running.asciidoc[] @@ -98,9 +94,9 @@ Log printing to `stdout` can be disabled using the `-q` or `--quiet` option on the command line. [[setup-installation-daemon]] -include::zip-targz-daemon.asciidoc[] +include::targz-daemon.asciidoc[] -[[zip-targz-configuring]] +[[targz-configuring]] ==== Configuring Elasticsearch on the command line Elasticsearch loads its configuration from the `$ES_HOME/config/elasticsearch.yml` @@ -119,10 +115,10 @@ TIP: Typically, any cluster-wide settings (like `cluster.name`) should be added to the `elasticsearch.yml` config file, while any node-specific settings such as `node.name` could be specified on the command line. -[[zip-targz-layout]] -==== Directory layout of `.zip` and `.tar.gz` archives +[[targz-layout]] +==== Directory layout of archives -The `.zip` and `.tar.gz` packages are entirely self-contained. All files and +The archive distributions are entirely self-contained. All files and directories are, by default, contained within `$ES_HOME` -- the directory created when unpacking the archive. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 46aadbc34a9f7..e53e8d4122070 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -12,14 +12,22 @@ You can continue using the `.zip` approach if you prefer. include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. -NOTE: Elasticsearch requires Java 8 or later. Use the -http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] -or an open-source distribution such as http://openjdk.java.net[OpenJDK]. +NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJDK] +from the JDK maintainers (GPLv2+CE). To use your own version of Java, +see the <> [[download-msi]] ==== Download the `.msi` package diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 967b449bc972b..669e3c72ea8b8 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -1,7 +1,7 @@ [[zip-windows]] === Install Elasticsearch with `.zip` on Windows -Elasticsearch can be installed on Windows using the `.zip` package. This +Elasticsearch can be installed on Windows using the Windows `.zip` archive. This comes with a `elasticsearch-service.bat` command which will setup Elasticsearch to run as a service. @@ -11,14 +11,22 @@ experience for Windows. You can continue using the `.zip` approach if you prefer include::license.asciidoc[] +NOTE: On Windows the Elasticsearch {ml} feature requires the Microsoft Universal +C Runtime library. This is built into Windows 10, Windows Server 2016 and more +recent versions of Windows. For older versions of Windows it can be installed +via Windows Update, or from a +https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows[separate download]. +If you cannot install the Microsoft Universal C Runtime library you can still +use the rest of Elasticsearch if you disable the {ml} feature. + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. -NOTE: Elasticsearch requires Java 8 or later. Use the -http://www.oracle.com/technetwork/java/javase/downloads/index.html[official Oracle distribution] -or an open-source distribution such as http://openjdk.java.net[OpenJDK]. +NOTE: Elasticsearch includes a bundled version of http://openjdk.java.net[OpenJDK] +from the JDK maintainers (GPLv2+CE). To use your own version of Java, +see the <> [[install-windows]] ==== Download and install the `.zip` package diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index dcea83a7f5d67..3ed86620115e4 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -212,7 +212,13 @@ logs to roll and compress after 1 GB, and to preserve a maximum of five log files (four rolled logs, and the active log). You can disable it in the `config/log4j2.properties` file by setting the deprecation -log level to `error`. +log level to `error` like this: +[source,properties] +-------------------------------------------------- +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = error +-------------------------------------------------- + [float] @@ -247,4 +253,4 @@ appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:e appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 6abf5dea14d0e..077fa1d5232a7 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -64,6 +64,18 @@ through stdin, use the `--stdin` flag: cat /file/containing/setting/value | bin/elasticsearch-keystore add --stdin the.setting.name.to.set ---------------------------------------------------------------- +[float] +[[add-file-to-keystore]] +=== Adding file settings +You can add sensitive files, like authentication key files for cloud plugins, +using the `add-file` command. Be sure to include your file path as an argument +after the setting name. + +[source,sh] +---------------------------------------------------------------- +bin/elasticsearch-keystore add-file the.setting.name.to.set /path/example-file.json +---------------------------------------------------------------- + [float] [[remove-settings]] === Removing settings @@ -103,3 +115,10 @@ had the new value from the start. When changing multiple *reloadable* secure settings, modify all of them, on each cluster node, and then issue a `reload_secure_settings` call, instead of reloading after each modification. + +There are reloadable secure settings for: + +* {plugins}/repository-azure-client-settings.html[The Azure repository plugin] +* {plugins}/discovery-ec2-usage.html#_configuring_ec2_discovery[The EC2 discovery plugin] +* {plugins}/repository-gcs-client.html[The GCS repository plugin] +* {plugins}/repository-s3-client.html[The S3 repository plugin] diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 9da482af8493a..24cef9c736966 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -65,6 +65,7 @@ repositories { // Add the Elasticsearch Maven Repository maven { + name "elastic" url "https://artifacts.elastic.co/maven" } } diff --git a/docs/reference/setup/setup-xes.asciidoc b/docs/reference/setup/setup-xes.asciidoc index b0003b1e0b7c4..55c1fe8bf42f6 100644 --- a/docs/reference/setup/setup-xes.asciidoc +++ b/docs/reference/setup/setup-xes.asciidoc @@ -7,7 +7,7 @@ monitoring, reporting, machine learning, and many other capabilities. By default when you install {es}, {xpack} is installed. If you want to try all of the {xpack} features, you can -{xpack-ref}/license-management.html[start a 30-day trial]. At the end of the +{stack-ov}/license-management.html[start a 30-day trial]. At the end of the trial period, you can purchase a subscription to keep using the full functionality of the {xpack} components. For more information, see https://www.elastic.co/subscriptions. @@ -15,6 +15,4 @@ https://www.elastic.co/subscriptions. * <> * <> * <> -* <> * <> - diff --git a/docs/reference/setup/starting.asciidoc b/docs/reference/setup/starting.asciidoc index 6fab871e7c9ca..c2ab6b1404aa2 100644 --- a/docs/reference/setup/starting.asciidoc +++ b/docs/reference/setup/starting.asciidoc @@ -11,10 +11,10 @@ If you installed {es} with a `.tar.gz` package, you can start {es} from the command line. [float] -include::install/zip-targz-start.asciidoc[] +include::install/targz-start.asciidoc[] [float] -include::install/zip-targz-daemon.asciidoc[] +include::install/targz-daemon.asciidoc[] [float] [[start-zip]] @@ -34,9 +34,11 @@ include::install/zip-windows-start.asciidoc[] include::install/init-systemd.asciidoc[] [float] +[[start-es-deb-init]] include::install/deb-init.asciidoc[] [float] +[[start-es-deb-systemd]] include::install/systemd.asciidoc[] [float] @@ -66,7 +68,9 @@ include::install/msi-windows-start.asciidoc[] include::install/init-systemd.asciidoc[] [float] +[[start-es-rpm-init]] include::install/rpm-init.asciidoc[] [float] -include::install/systemd.asciidoc[] \ No newline at end of file +[[start-es-rpm-systemd]] +include::install/systemd.asciidoc[] diff --git a/docs/reference/setup/stopping.asciidoc b/docs/reference/setup/stopping.asciidoc index 4f632ec06f1dd..c9f718aa088c3 100644 --- a/docs/reference/setup/stopping.asciidoc +++ b/docs/reference/setup/stopping.asciidoc @@ -36,8 +36,8 @@ $ cat /tmp/elasticsearch-pid && echo $ kill -SIGTERM 15516 -------------------------------------------------- -[[fatal-errors] [float] +[[fatal-errors]] === Stopping on Fatal Errors During the life of the Elasticsearch virtual machine, certain fatal errors could arise that put the diff --git a/docs/reference/sql/appendix/syntax-reserved.asciidoc b/docs/reference/sql/appendix/syntax-reserved.asciidoc index f83939359a4ad..bf1b4c227af75 100644 --- a/docs/reference/sql/appendix/syntax-reserved.asciidoc +++ b/docs/reference/sql/appendix/syntax-reserved.asciidoc @@ -3,11 +3,9 @@ [[sql-syntax-reserved]] == Reserved keywords -beta[] - Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious. -The following table lists all of the keywords that are reserved in Presto, +The following table lists all of the keywords that are reserved in {es-sql}, along with their status in the SQL standard. These reserved keywords must be quoted (using double quotes) in order to be used as an identifier, for example: @@ -33,43 +31,65 @@ s|SQL-92 |`BETWEEN` |reserved |reserved |`BY` |reserved |reserved |`CAST` |reserved |reserved +|`CATALOG` |reserved |reserved +|`CONVERT` |reserved |reserved +|`CURRENT_DATE` |reserved |reserved +|`CURRENT_TIMESTAMP` |reserved |reserved +|`DAY` |reserved |reserved +|`DAYS` | | |`DESC` |reserved |reserved |`DESCRIBE` |reserved |reserved |`DISTINCT` |reserved |reserved +|`ESCAPE` |reserved |reserved |`EXISTS` |reserved |reserved |`EXPLAIN` |reserved |reserved |`EXTRACT` |reserved |reserved |`FALSE` |reserved |reserved +|`FIRST` |reserved |reserved |`FROM` |reserved |reserved |`FULL` |reserved |reserved |`GROUP` |reserved |reserved |`HAVING` |reserved |reserved +|`HOUR` |reserved |reserved +|`HOURS` | | |`IN` |reserved |reserved |`INNER` |reserved |reserved +|`INTERVAL` |reserved |reserved |`IS` |reserved |reserved |`JOIN` |reserved |reserved |`LEFT` |reserved |reserved |`LIKE` |reserved |reserved |`LIMIT` |reserved |reserved |`MATCH` |reserved |reserved +|`MINUTE` |reserved |reserved +|`MINUTES` | | +|`MONTH` |reserved |reserved |`NATURAL` |reserved |reserved -|`NO` |reserved |reserved |`NOT` |reserved |reserved |`NULL` |reserved |reserved +|`NULLS` | | |`ON` |reserved |reserved |`OR` |reserved |reserved |`ORDER` |reserved |reserved |`OUTER` |reserved |reserved |`RIGHT` |reserved |reserved +|`RLIKE` | | +|`QUERY` | | +|`SECOND` |reserved |reserved +|`SECONDS` | | |`SELECT` |reserved |reserved |`SESSION` | |reserved |`TABLE` |reserved |reserved +|`TABLES` | | |`THEN` |reserved |reserved |`TO` |reserved |reserved |`TRUE` |reserved |reserved +|`TYPE` | | |`USING` |reserved |reserved |`WHEN` |reserved |reserved |`WHERE` |reserved |reserved |`WITH` |reserved |reserved +|`YEAR` |reserved |reserved +|`YEARS` | | |=== diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc index 0d68380a497b7..49faaa9cf9b06 100644 --- a/docs/reference/sql/concepts.asciidoc +++ b/docs/reference/sql/concepts.asciidoc @@ -3,8 +3,6 @@ [[sql-concepts]] == Conventions and Terminology -beta[] - For clarity, it is important to establish the meaning behind certain words as, the same wording might convey different meanings to different readers depending on one's familiarity with SQL versus {es}. NOTE: This documentation while trying to be complete, does assume the reader has _basic_ understanding of {es} and/or SQL. If that is not the case, please continue reading the documentation however take notes and pursue the topics that are unclear either through the main {es} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate). @@ -15,8 +13,6 @@ Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Pri === Mapping concepts across SQL and {es} -beta[] - While SQL and {es} have different terms for the way the data is organized (and different semantics), essentially their purpose is the same. So let's start from the bottom; these roughly are: @@ -62,7 +58,7 @@ Further more, an {es} `cluster` can be connected to other ++cluster++s in a _fed single cluster:: Multiple {es} instances typically distributed across machines, running within the same namespace. multiple clusters:: -Multiple clusters, each with its own namespace, connected to each other in a federated setup (see <>). +Multiple clusters, each with its own namespace, connected to each other in a federated setup (see <>). |=== diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc index c5d0290ca2c58..eef2fbfbf5969 100644 --- a/docs/reference/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -3,8 +3,6 @@ [[sql-cli]] == SQL CLI -beta[] - Elasticsearch ships with a script to run the SQL CLI in its `bin` directory: [source,bash] diff --git a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc index 63d7040692ffe..8a7792b525860 100644 --- a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc @@ -3,16 +3,16 @@ [[sql-client-apps-dbeaver]] === DBeaver -beta[] - [quote, https://dbeaver.io/] ____ https://dbeaver.io/[DBeaver] DBeaver is free and open source universal database tool for developers and database administrators. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites -* DBeaver version 5.1.4 or higher +* DBeaver version 6.0.0 or higher * {es-sql} <> ==== New Connection diff --git a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc index 779a27a06c752..2dd9f7aaf101d 100644 --- a/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbvis.asciidoc @@ -3,13 +3,13 @@ [[sql-client-apps-dbvis]] === DbVisualizer -beta[] - [quote, http://www.dbvis.com/] ____ https://www.dbvis.com/[DbVisualizer] is a database management and analysis tool for all major databases. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * {es-sql} <> diff --git a/docs/reference/sql/endpoints/client-apps/excel.asciidoc b/docs/reference/sql/endpoints/client-apps/excel.asciidoc index 6f7c98e04d5b5..0498a11d9962c 100644 --- a/docs/reference/sql/endpoints/client-apps/excel.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/excel.asciidoc @@ -3,14 +3,14 @@ [[sql-client-apps-excel]] === Microsoft Excel -experimental[] - [quote, https://www.techopedia.com/definition/5430/microsoft-excel] ____ https://products.office.com/en/excel[Microsoft Excel] is a software program [...] that allows users to organize, format and calculate data with formulas using a spreadsheet system. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * Microsoft Office 2016 or higher diff --git a/docs/reference/sql/endpoints/client-apps/index.asciidoc b/docs/reference/sql/endpoints/client-apps/index.asciidoc index 80932a0a27242..544a0349f76f9 100644 --- a/docs/reference/sql/endpoints/client-apps/index.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/index.asciidoc @@ -3,9 +3,7 @@ [[sql-client-apps]] == SQL Client Applications -beta[] - -Thanks to its <> and <> interfaces, a broad range of third-party applications can use {es}'s SQL capabilities. +Thanks to its <> and <> interfaces, a broad range of third-party applications can use {es-sql} capabilities. This section lists, in alphabetical order, a number of them and their respective configuration - the list however is by no means comprehensive (feel free to https://www.elastic.co/blog/art-of-pull-request[submit a PR] to improve it): as long as the app can use the {es-sql} driver, it can use {es-sql}. @@ -20,7 +18,9 @@ as long as the app can use the {es-sql} driver, it can use {es-sql}. * <> * <> -NOTE: Each application has its own requirements and license; these are outside the scope of this documentation +IMPORTANT: Elastic does not endorse, promote or provide support for any of the applications listed. For native {es} integration in these products, please reach out to their respective vendor. + +NOTE: Each application has its own requirements and license these are outside the scope of this documentation which covers only the configuration aspect with {es-sql}. WARNING: The support for applications implementing the ODBC 2.x standard and prior is currently limited. @@ -33,5 +33,5 @@ include::ps1.asciidoc[] include::microstrat.asciidoc[] include::qlik.asciidoc[] include::squirrel.asciidoc[] -include::tableau.asciidoc[] include::workbench.asciidoc[] +include::tableau.asciidoc[] diff --git a/docs/reference/sql/endpoints/client-apps/microstrat.asciidoc b/docs/reference/sql/endpoints/client-apps/microstrat.asciidoc index 898d84afb14ac..bbbb97f5f5b3a 100644 --- a/docs/reference/sql/endpoints/client-apps/microstrat.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/microstrat.asciidoc @@ -3,14 +3,14 @@ [[sql-client-apps-microstrat]] === MicroStrategy Desktop -experimental[] - [quote, https://www.microstrategy.com/us/resources/library/videos/new-microstrategy-desktop] ____ https://www.microstrategy.com/us/get-started/desktop[MicroStrategy Desktop] is a free data discovery tool that helps people bring data to life using powerful self-service analytics. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * MicroStrategy Desktop 11 or higher @@ -19,7 +19,7 @@ ____ ==== Data loading -To use the {product} to load data into MicroStrategy Desktop perform the following steps in sequence. +To use the {odbc} to load data into MicroStrategy Desktop perform the following steps in sequence. . Create a New Dossier + diff --git a/docs/reference/sql/endpoints/client-apps/powerbi.asciidoc b/docs/reference/sql/endpoints/client-apps/powerbi.asciidoc index f2ddb3b2e131b..bb6b250d6ec85 100644 --- a/docs/reference/sql/endpoints/client-apps/powerbi.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/powerbi.asciidoc @@ -3,14 +3,14 @@ [[sql-client-apps-powerbi]] === Microsoft Power BI Desktop -experimental[] - [quote, https://powerbi.microsoft.com/en-us/what-is-power-bi/] ____ https://powerbi.microsoft.com/en-us/desktop/[Power BI] is a business analytics solution that lets you visualize your data and share insights across your organization, or embed them in your app or website. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * Microsoft Power BI Desktop 2.63 or higher diff --git a/docs/reference/sql/endpoints/client-apps/ps1.asciidoc b/docs/reference/sql/endpoints/client-apps/ps1.asciidoc index 2f43b0978ba27..30fd82395f10b 100644 --- a/docs/reference/sql/endpoints/client-apps/ps1.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/ps1.asciidoc @@ -3,15 +3,14 @@ [[sql-client-apps-ps1]] === Microsoft PowerShell -experimental[] - [quote, https://docs.microsoft.com/en-us/powershell/scripting/powershell-scripting] ____ https://docs.microsoft.com/en-us/powershell/[PowerShell] is a task-based command-line shell and scripting language built on .NET. ____ -PowerShell is available on all recent Windows Desktop OSes. It also has embedded ODBC support, thus offering a quick and accessible way to -connect to {es}. +PowerShell is available on all recent Windows Desktop OSes. It also has embedded ODBC support, thus offering a quick and accessible way to connect to {es}. + +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. ==== Prerequisites diff --git a/docs/reference/sql/endpoints/client-apps/qlik.asciidoc b/docs/reference/sql/endpoints/client-apps/qlik.asciidoc index 425914ad176bc..f2c246d5f33bf 100644 --- a/docs/reference/sql/endpoints/client-apps/qlik.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/qlik.asciidoc @@ -3,8 +3,6 @@ [[sql-client-apps-qlik]] === Qlik Sense Desktop -experimental[] - [quote, https://help.qlik.com/en-US/sense/February2018/Subsystems/Hub/Content/Introduction/at-a-glance.htm] ____ https://www.qlik.com/us/try-or-buy/download-qlik-sense[Qlik Sense Desktop] is a Windows application that gives individuals the opportunity @@ -12,6 +10,8 @@ to use Qlik Sense and create personalized, interactive data visualizations, repo drag-and-drop ease. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * Qlik Sense Desktop November 2018 or higher @@ -20,7 +20,7 @@ ____ ==== Data loading -To use the {product} to load data into Qlik Sense Desktop perform the following steps in sequence. +To use the {odbc} to load data into Qlik Sense Desktop perform the following steps in sequence. . Create new app + diff --git a/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc b/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc index efec4f135dd19..bea4644cb9152 100644 --- a/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc @@ -3,13 +3,13 @@ [[sql-client-apps-squirrel]] === SQuirreL SQL -beta[] - [quote, http://squirrel-sql.sourceforge.net/] ____ http://squirrel-sql.sourceforge.net/[SQuirreL SQL] is a graphical, [multi-platform] Java program that will allow you to view the structure of a JDBC compliant database [...]. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * {es-sql} <> diff --git a/docs/reference/sql/endpoints/client-apps/tableau.asciidoc b/docs/reference/sql/endpoints/client-apps/tableau.asciidoc index c302f67ad977e..e50cb3ca5c753 100644 --- a/docs/reference/sql/endpoints/client-apps/tableau.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/tableau.asciidoc @@ -3,14 +3,14 @@ [[sql-client-apps-tableau]] === Tableau Desktop -experimental[] - [quote, https://www.tableau.com/products/what-is-tableau] ____ https://www.tableau.com/products/desktop[Tableau] is the most powerful, secure, and flexible end-to-end analytics platform for your data. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * Tableau 2018 or higher diff --git a/docs/reference/sql/endpoints/client-apps/workbench.asciidoc b/docs/reference/sql/endpoints/client-apps/workbench.asciidoc index e462d6a9690ea..2891b542900cf 100644 --- a/docs/reference/sql/endpoints/client-apps/workbench.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/workbench.asciidoc @@ -3,13 +3,13 @@ [[sql-client-apps-workbench]] === SQL Workbench/J -beta[] - [quote, https://www.sql-workbench.eu/] ____ https://www.sql-workbench.eu/[SQL Workbench/J] is a free, DBMS-independent, cross-platform SQL query tool. ____ +IMPORTANT: Elastic does not endorse, promote or provide support for this application; for native Elasticsearch integration in this product, please reach out to its vendor. + ==== Prerequisites * {es-sql} <> diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 56c68fd34937f..c4ae640c01321 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -3,8 +3,6 @@ [[sql-jdbc]] == SQL JDBC -beta[] - {es}'s SQL jdbc driver is a rich, fully featured JDBC driver for {es}. It is Type 4 driver, meaning it is a platform independent, stand-alone, Direct to Database, pure Java driver that converts JDBC calls to {es-sql}. @@ -53,16 +51,23 @@ Once registered, the driver understands the following syntax as an URL: ["source","text",subs="attributes"] ---- -jdbc:es://<1>[http|https]?<2>[host[:port]]*<3>/[prefix]*<4>[?[option=value]&<5>]* +jdbc:es://[[http|https]://]*[host[:port]]*/[prefix]*<[?[option=value]&]* ---- +`jdbc:es://`:: Prefix. Mandatory. + +`[[http|https]://]`:: Type of HTTP connection to make. Possible values are +`http` (default) or `https`. Optional. + +`[host[:port]]`:: Host (`localhost` by default) and port (`9200` by default). +Optional. -<1> `jdbc:es://` prefix. Mandatory. -<2> type of HTTP connection to make - `http` (default) or `https`. Optional. -<3> host (`localhost` by default) and port (`9200` by default). Optional. -<4> prefix (empty by default). Typically used when hosting {es} under a certain path. Optional. -<5> Parameters for the JDBC driver. Empty by default. Optional. +`[prefix]`:: Prefix (empty by default). Typically used when hosting {es} under +a certain path. Optional. -The driver recognized the following parameters: +`[option=value]`:: Properties for the JDBC driver. Empty by default. +Optional. + +The driver recognized the following properties: [[jdbc-cfg]] [float] @@ -115,6 +120,8 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que `ssl.truststore.pass`:: trust store password +`ssl.truststore.type` (default `JKS`):: trust store type. `PKCS12` is a common, alternative format + `ssl.protocol`(default `TLS`):: SSL protocol to be used [float] @@ -124,6 +131,17 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que `proxy.socks`:: SOCKS proxy host name +[float] +==== Mapping +`field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value (without any guarantees of what that +will be - typically the first in natural ascending order) for fields with multiple values (true) or throw an exception. + +[float] +==== Additional + +`validate.properties` (default true):: If disabled, it will ignore any misspellings or unrecognizable properties. When enabled, an exception +will be thrown if the provided property cannot be recognized. + To put all of it together, the following URL: @@ -136,10 +154,9 @@ Opens up a {es-sql} connection to `server` on port `3456`, setting the JDBC conn === API usage -beta[] - One can use JDBC through the official `java.sql` and `javax.sql` packages: +[[java-sql]] ==== `java.sql` The former through `java.sql.Driver` and `DriverManager`: @@ -152,6 +169,7 @@ HTTP traffic. The port is by default 9200. <2> Properties for connecting to Elasticsearch. An empty `Properties` instance is fine for unsecured Elasticsearch. +[[javax-sql]] ==== `javax.sql` Accessible through the `javax.sql.DataSource` API: @@ -165,10 +183,10 @@ HTTP traffic. By default 9200. instance is fine for unsecured Elasticsearch. Which one to use? Typically client applications that provide most -configuration parameters in the URL rely on the `DriverManager`-style +configuration properties in the URL rely on the `DriverManager`-style while `DataSource` is preferred when being _passed_ around since it can be configured in one place and the consumer only has to call `getConnection` -without having to worry about any other parameters. +without having to worry about any other properties. To connect to a secured Elasticsearch server the `Properties` should look like: @@ -185,3 +203,8 @@ connection. For example: -------------------------------------------------- include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example] -------------------------------------------------- + +NOTE:: {es-sql} doesn't provide a connection pooling mechanism, thus the connections +the JDBC driver creates are not pooled. In order to achieve pooled connections, +a third-party connection pooling mechanism is required. Configuring and setting up the +third-party provider is outside the scope of this documentation. \ No newline at end of file diff --git a/docs/reference/sql/endpoints/odbc.asciidoc b/docs/reference/sql/endpoints/odbc.asciidoc index f0d7886102679..1a7dd974281c8 100644 --- a/docs/reference/sql/endpoints/odbc.asciidoc +++ b/docs/reference/sql/endpoints/odbc.asciidoc @@ -1,7 +1,4 @@ -:es: Elasticsearch -:es-sql: {es} SQL -:version: 6.5.0 -:product: {es-sql} ODBC Driver +:odbc: {es-sql} ODBC Driver [role="xpack"] [testenv="platinum"] @@ -12,9 +9,7 @@ [float] === Overview -experimental[] - -{product} is a feature-rich 3.80 ODBC driver for {es}. +{odbc} is a feature-rich 3.80 ODBC driver for {es}. It is a core level driver, exposing all of the functionality accessible through the {es}'s SQL ODBC API, converting ODBC calls into {es-sql}. diff --git a/docs/reference/sql/endpoints/odbc/configuration.asciidoc b/docs/reference/sql/endpoints/odbc/configuration.asciidoc index dbc6f7b87c729..8bda67ce063d5 100644 --- a/docs/reference/sql/endpoints/odbc/configuration.asciidoc +++ b/docs/reference/sql/endpoints/odbc/configuration.asciidoc @@ -3,8 +3,6 @@ [[sql-odbc-setup]] === Configuration -experimental[] - Once the driver has been installed, in order for an application to be able to connect to {es} through ODBC, a set of configuration parameters must be provided to the driver. Depending on the application, there are generally three ways of providing these parameters: * through a connection string; @@ -63,7 +61,7 @@ Such a file can be then shared among multiple systems and the user will need to The configuration steps are similar for all the above points. Following is an example of configuring a System DSN. [float] -===== 2.1 Launch {product} DSN Editor +===== 2.1 Launch {odbc} DSN Editor Click on the _System DSN_ tab, then on the _Add..._ button: [[system_add]] @@ -76,10 +74,10 @@ A new window will open, listing all available installed drivers. Click on _{es} .Launch the DSN Editor image:images/sql/odbc/administrator_launch_editor.png[] -This action closes the previously opened second window and open a new one instead, {product}'s DSN Editor: +This action closes the previously opened second window and open a new one instead, {odbc}'s DSN Editor: [[dsn_editor]] -.{product} DSN Editor +.{odbc} DSN Editor image:images/sql/odbc/dsn_editor_basic.png[] This new window has three tabs, each responsible for a set of configuration parameters, as follows. @@ -173,7 +171,7 @@ image:images/sql/odbc/dsn_editor_security_cert.png[] [float] ===== 2.4 Logging parameters -For troubleshooting purposes, the {product} offers functionality to log the API calls that an application makes; this is enabled in the Administrator application: +For troubleshooting purposes, the {odbc} offers functionality to log the API calls that an application makes; this is enabled in the Administrator application: [[administrator_tracing]] .Enable Application ODBC API logging @@ -241,7 +239,7 @@ image:images/sql/odbc/administrator_system_added.png[] Due to the specification of the ODBC API, the driver will receive the configured DSN parameters - including the logging ones - only once a connection API is invoked (such as _SQLConnect_ or _SQLDriverConnect_). The _Driver Manager_ will however always make a set of API calls into the driver before attempting to establish a connection. To capture those calls as well, one needs to pass logging configuration -parameters in an alternative way. The {product} will use an environment variable for this purpose. +parameters in an alternative way. The {odbc} will use an environment variable for this purpose. Configuring an environment variable is OS specific and not detailed in this guide. Whether the variable should be configured system-wide or user-specific depends on the way the ODBC-enabled application is being run and if logging should affect the current user only or not. diff --git a/docs/reference/sql/endpoints/odbc/installation.asciidoc b/docs/reference/sql/endpoints/odbc/installation.asciidoc index e112e24bba78f..08f0c66ee2a8f 100644 --- a/docs/reference/sql/endpoints/odbc/installation.asciidoc +++ b/docs/reference/sql/endpoints/odbc/installation.asciidoc @@ -3,14 +3,12 @@ [[sql-odbc-installation]] === Driver installation -experimental[] - -The {product} can be installed on Microsoft Windows using an MSI package. The installation process is simple and is composed of standard MSI wizard steps. +The {odbc} can be installed on Microsoft Windows using an MSI package. The installation process is simple and is composed of standard MSI wizard steps. [[prerequisites]] ==== Installation Prerequisites -Before you install the {product} you need to meet the following prerequisites; +Before you install the {odbc} you need to meet the following prerequisites; * Windows 10 64 bit _or_ Windows Server 2016 64 bit operating system * .NET Framework 4.0 full - https://www.microsoft.com/en-au/download/details.aspx?id=17718 @@ -26,7 +24,7 @@ NOTE: It is not possible to inline upgrade using the MSI. In order to upgrade, y [[download]] ==== Download the `.msi` package(s) -Download the `.msi` package for {product} {version} from: +Download the `.msi` package for {odbc} {version} from: https://www.elastic.co/downloads/odbc-client There are two versions of the installer available: diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc index cdce98ef0e500..46d434c851d84 100644 --- a/docs/reference/sql/endpoints/rest.asciidoc +++ b/docs/reference/sql/endpoints/rest.asciidoc @@ -3,11 +3,18 @@ [[sql-rest]] == SQL REST API -beta[] +* <> +* <> +* <> +* <> +* <> -The SQL REST API accepts SQL in a JSON document, executes it, -and returns the results. For example: +[[sql-rest-overview]] +=== Overview +The SQL REST API accepts SQL in a JSON document, executes it, +and returns the results. +For example: [source,js] -------------------------------------------------- @@ -32,21 +39,98 @@ Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z -------------------------------------------------- // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] -// TESTRESPONSE[_cat] - -While the `text/plain` format is nice for humans, computers prefer something -more structured. You can replace the value of `format` with: -- `json` aka `application/json` -- `yaml` aka `application/yaml` -- `smile` aka `application/smile` -- `cbor` aka `application/cbor` -- `txt` aka `text/plain` -- `csv` aka `text/csv` -- `tsv` aka `text/tab-separated-values` - -Alternatively you can set the `Accept` HTTP header to the appropriate media -format. The GET parameter takes precedence over the header. If neither is -specified then the response is returned in the same format as the request. +// TESTRESPONSE[non_json] + +[[sql-kibana-console]] +.Using Kibana Console +If you are using {kibana-ref}/console-kibana.html[Kibana Console] +(which is highly recommended), take advantage of the +triple quotes `"""` when creating the query. This not only automatically escapes double +quotes (`"`) inside the query string but also support multi-line as shown below: +image:images/sql/rest/console-triple-quotes.png[] + +[[sql-rest-format]] +=== Response Data Formats + +While the textual format is nice for humans, computers prefer something +more structured. + +{es-sql} can return the data in the following formats which can be set +either through the `format` property in the URL or by setting the `Accept` HTTP header: + +NOTE: The URL parameter takes precedence over the `Accept` HTTP header. +If neither is specified then the response is returned in the same format as the request. + +[cols="^m,^4m,^8"] + +|=== +s|format +s|`Accept` HTTP header +s|Description + +3+h| Human Readable + +|csv +|text/csv +|https://en.wikipedia.org/wiki/Comma-separated_values[Comma-separated values] + +|json +|application/json +|https://www.json.org/[JSON] (JavaScript Object Notation) human-readable format + +|tsv +|text/tab-separated-values +|https://en.wikipedia.org/wiki/Tab-separated_values[Tab-separated values] + +|txt +|text/plain +|CLI-like representation + +|yaml +|application/yaml +|https://en.wikipedia.org/wiki/YAML[YAML] (YAML Ain't Markup Language) human-readable format + +3+h| Binary Formats + +|cbor +|application/cbor +|http://cbor.io/[Concise Binary Object Representation] + +|smile +|application/smile +|https://en.wikipedia.org/wiki/Smile_(data_interchange_format)[Smile] binary data format similar to CBOR + +|=== + +Here are some examples for the human readable formats: + +==== CSV + +[source,js] +-------------------------------------------------- +POST /_sql?format=csv +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,text] +-------------------------------------------------- +author,name,page_count,release_date +Peter F. Hamilton,Pandora's Star,768,2004-03-02T00:00:00.000Z +Vernor Vinge,A Fire Upon the Deep,613,1992-06-01T00:00:00.000Z +Frank Herbert,Dune,604,1965-06-01T00:00:00.000Z +Alastair Reynolds,Revelation Space,585,2000-03-15T00:00:00.000Z +James S.A. Corey,Leviathan Wakes,561,2011-06-02T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[non_json] + +==== JSON [source,js] -------------------------------------------------- @@ -82,7 +166,116 @@ Which returns: -------------------------------------------------- // TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl\+v\/\/\/w8=/$body.cursor/] -You can continue to the next page by sending back the `cursor` field. In +==== TSV + +[source,js] +-------------------------------------------------- +POST /_sql?format=tsv +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,text] +-------------------------------------------------- +author name page_count release_date +Peter F. Hamilton Pandora's Star 768 2004-03-02T00:00:00.000Z +Vernor Vinge A Fire Upon the Deep 613 1992-06-01T00:00:00.000Z +Frank Herbert Dune 604 1965-06-01T00:00:00.000Z +Alastair Reynolds Revelation Space 585 2000-03-15T00:00:00.000Z +James S.A. Corey Leviathan Wakes 561 2011-06-02T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\t/ /] +// TESTRESPONSE[non_json] + +==== TXT + +[source,js] +-------------------------------------------------- +POST /_sql?format=txt +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,text] +-------------------------------------------------- + author | name | page_count | release_date +-----------------+--------------------+---------------+------------------------ +Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z +Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z +Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z +Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z +James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z +-------------------------------------------------- +// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] +// TESTRESPONSE[non_json] + +==== YAML + +[source,js] +-------------------------------------------------- +POST /_sql?format=yaml +{ + "query": "SELECT * FROM library ORDER BY page_count DESC", + "fetch_size": 5 +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:library] + +Which returns: + +[source,yaml] +-------------------------------------------------- +columns: +- name: "author" + type: "text" +- name: "name" + type: "text" +- name: "page_count" + type: "short" +- name: "release_date" + type: "datetime" +rows: +- - "Peter F. Hamilton" + - "Pandora's Star" + - 768 + - "2004-03-02T00:00:00.000Z" +- - "Vernor Vinge" + - "A Fire Upon the Deep" + - 613 + - "1992-06-01T00:00:00.000Z" +- - "Frank Herbert" + - "Dune" + - 604 + - "1965-06-01T00:00:00.000Z" +- - "Alastair Reynolds" + - "Revelation Space" + - 585 + - "2000-03-15T00:00:00.000Z" +- - "James S.A. Corey" + - "Leviathan Wakes" + - 561 + - "2011-06-02T00:00:00.000Z" +cursor: "sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8=" +-------------------------------------------------- +// TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl\+v\/\/\/w8=/$body.cursor/] + +[[sql-pagination]] +=== Paginating through a large response + +Using the example above, onu can continue to the next page by sending back the `cursor` field. In case of text format the cursor is returned as `Cursor` http header. [source,js] @@ -113,7 +306,7 @@ Which looks like: -------------------------------------------------- // TESTRESPONSE[s/sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWODRMaXBUaVlRN21iTlRyWHZWYUdrdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl9f\/\/\/w8=/$body.cursor/] -Note that the `column` object is only part of the first page. +Note that the `columns` object is only part of the first page. You've reached the last page when there is no `cursor` returned in the results. Like Elasticsearch's <>, @@ -147,9 +340,10 @@ Which will like return the [[sql-rest-filtering]] +=== Filtering using {es} query DSL You can filter the results that SQL will run on using a standard -Elasticsearch query DSL by specifying the query in the filter +{es} query DSL by specifying the query in the filter parameter. [source,js] @@ -180,13 +374,54 @@ Which returns: Douglas Adams |The Hitchhiker's Guide to the Galaxy|180 |1979-10-12T00:00:00.000Z -------------------------------------------------- // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] [[sql-rest-fields]] -In addition to the `query` and `cursor` fields, the request can -contain `fetch_size` and `time_zone`. `fetch_size` is a hint for how -many results to return in each page. SQL might chose to return more -or fewer results though. `time_zone` is the time zone to use for datetime -functions and datetime parsing. `time_zone` defaults to `utc` and can take -any values documented -http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here]. +=== Supported REST parameters + +In addition to the `query` and `fetch_size`, a request a number of user-defined fields for specifying +the request time-outs or localization information (such as timezone). + +The table below lists the supported parameters: + +[cols="^m,^m,^5"] + +|=== + +s|name +s|Default value +s|Description + +|query +|Mandatory +|SQL query to execute + +|fetch_size +|1000 +|The maximum number of rows (or entries) to return in one response + +|filter +|none +|Optional {es} query DSL for additional <>. + +|request_timeout +|90s +|The timeout before the request fails. + +|page_timeout +|45s +|The timeout before a pagination request fails. + +|time_zone +|`Z` (or `UTC`) +|Time-zone in ISO 8601 used for executing the query on the server. +More information available https://docs.oracle.com/javase/8/docs/api/java/time/ZoneId.html[here]. + +|field_multi_value_leniency +|false +|Throw an exception when encountering multiple values for a field (default) or be lenient and return the first value from the list (without any guarantees of what that will be - typically the first in natural ascending order). + +|=== + +Do note that most parameters (outside the timeout ones) make sense only during the initial query - any follow-up pagination request only requires the `cursor` parameter as explained in the <> chapter. +That's because the query has already been executed and the calls are simply about returning the found results - thus the parameters are simply ignored. \ No newline at end of file diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index de9d8adbeab0b..6a347ff614af7 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -3,10 +3,8 @@ [[sql-translate]] == SQL Translate API -beta[] - The SQL Translate API accepts SQL in a JSON document and translates it -into native Elasticsearch queries. For example: +into native {es} queries. For example: [source,js] -------------------------------------------------- diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc index 152d47715250d..c609a642d4b5e 100644 --- a/docs/reference/sql/functions/aggs.asciidoc +++ b/docs/reference/sql/functions/aggs.asciidoc @@ -3,20 +3,20 @@ [[sql-functions-aggs]] === Aggregate Functions -beta[] - Functions for computing a _single_ result from a set of input values. {es-sql} supports aggregate functions only alongside <> (implicit or explicit). -==== General Purpose +[[sql-functions-aggs-general]] +[float] +=== General Purpose [[sql-functions-aggs-avg]] -===== `AVG` +==== `AVG` .Synopsis: [source, sql] -------------------------------------------------- -AVG(numeric_field<1>) +AVG(numeric_field) <1> -------------------------------------------------- *Input*: @@ -35,12 +35,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggAvg] -------------------------------------------------- [[sql-functions-aggs-count]] -===== `COUNT` +==== `COUNT` .Synopsis: [source, sql] -------------------------------------------------- -COUNT(expression<1>) +COUNT(expression) <1> -------------------------------------------------- *Input*: @@ -65,12 +65,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggCountStar] [[sql-functions-aggs-count-all]] -===== `COUNT(ALL)` +==== `COUNT(ALL)` .Synopsis: [source, sql] -------------------------------------------------- -COUNT(ALL field_name<1>) +COUNT(ALL field_name) <1> -------------------------------------------------- *Input*: @@ -90,12 +90,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggCountAll] [[sql-functions-aggs-count-distinct]] -===== `COUNT(DISTINCT)` +==== `COUNT(DISTINCT)` .Synopsis: [source, sql] -------------------------------------------------- -COUNT(DISTINCT field_name<1>) +COUNT(DISTINCT field_name) <1> -------------------------------------------------- *Input*: @@ -114,12 +114,14 @@ include-tagged::{sql-specs}/docs.csv-spec[aggCountDistinct] -------------------------------------------------- [[sql-functions-aggs-first]] -===== `FIRST/FIRST_VALUE` +==== `FIRST/FIRST_VALUE` .Synopsis: [source, sql] ---------------------------------------------- -FIRST(field_name<1>[, ordering_field_name]<2>) +FIRST( + field_name <1> + [, ordering_field_name]) <2> ---------------------------------------------- *Input*: @@ -209,12 +211,14 @@ include-tagged::{sql-specs}/docs.csv-spec[firstValueWithTwoArgsAndGroupBy] the field is also <>. [[sql-functions-aggs-last]] -===== `LAST/LAST_VALUE` +==== `LAST/LAST_VALUE` .Synopsis: [source, sql] -------------------------------------------------- -LAST(field_name<1>[, ordering_field_name]<2>) +LAST( + field_name <1> + [, ordering_field_name]) <2> -------------------------------------------------- *Input*: @@ -304,12 +308,12 @@ include-tagged::{sql-specs}/docs.csv-spec[lastValueWithTwoArgsAndGroupBy] the field is also <>. [[sql-functions-aggs-max]] -===== `MAX` +==== `MAX` .Synopsis: [source, sql] -------------------------------------------------- -MAX(field_name<1>) +MAX(field_name) <1> -------------------------------------------------- *Input*: @@ -332,12 +336,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggMax] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-min]] -===== `MIN` +==== `MIN` .Synopsis: [source, sql] -------------------------------------------------- -MIN(field_name<1>) +MIN(field_name) <1> -------------------------------------------------- *Input*: @@ -360,12 +364,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggMin] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-sum]] -===== `SUM` +==== `SUM` .Synopsis: [source, sql] -------------------------------------------------- -SUM(field_name<1>) +SUM(field_name) <1> -------------------------------------------------- *Input*: @@ -383,15 +387,17 @@ Returns the sum of input values in the field `field_name`. include-tagged::{sql-specs}/docs.csv-spec[aggSum] -------------------------------------------------- -==== Statistics +[[sql-functions-aggs-statistics]] +[float] +=== Statistics [[sql-functions-aggs-kurtosis]] -===== `KURTOSIS` +==== `KURTOSIS` .Synopsis: [source, sql] -------------------------------------------------- -KURTOSIS(field_name<1>) +KURTOSIS(field_name) <1> -------------------------------------------------- *Input*: @@ -410,12 +416,14 @@ include-tagged::{sql-specs}/docs.csv-spec[aggKurtosis] -------------------------------------------------- [[sql-functions-aggs-percentile]] -===== `PERCENTILE` +==== `PERCENTILE` .Synopsis: [source, sql] -------------------------------------------------- -PERCENTILE(field_name<1>, numeric_exp<2>) +PERCENTILE( + field_name, <1> + numeric_exp) <2> -------------------------------------------------- *Input*: @@ -436,12 +444,14 @@ include-tagged::{sql-specs}/docs.csv-spec[aggPercentile] -------------------------------------------------- [[sql-functions-aggs-percentile-rank]] -===== `PERCENTILE_RANK` +==== `PERCENTILE_RANK` .Synopsis: [source, sql] -------------------------------------------------- -PERCENTILE_RANK(field_name<1>, numeric_exp<2>) +PERCENTILE_RANK( + field_name, <1> + numeric_exp) <2> -------------------------------------------------- *Input*: @@ -462,12 +472,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggPercentileRank] -------------------------------------------------- [[sql-functions-aggs-skewness]] -===== `SKEWNESS` +==== `SKEWNESS` .Synopsis: [source, sql] -------------------------------------------------- -SKEWNESS(field_name<1>) +SKEWNESS(field_name) <1> -------------------------------------------------- *Input*: @@ -486,12 +496,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggSkewness] -------------------------------------------------- [[sql-functions-aggs-stddev-pop]] -===== `STDDEV_POP` +==== `STDDEV_POP` .Synopsis: [source, sql] -------------------------------------------------- -STDDEV_POP(field_name<1>) +STDDEV_POP(field_name) <1> -------------------------------------------------- *Input*: @@ -510,12 +520,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggStddevPop] -------------------------------------------------- [[sql-functions-aggs-sum-squares]] -===== `SUM_OF_SQUARES` +==== `SUM_OF_SQUARES` .Synopsis: [source, sql] -------------------------------------------------- -SUM_OF_SQUARES(field_name<1>) +SUM_OF_SQUARES(field_name) <1> -------------------------------------------------- *Input*: @@ -534,12 +544,12 @@ include-tagged::{sql-specs}/docs.csv-spec[aggSumOfSquares] -------------------------------------------------- [[sql-functions-aggs-var-pop]] -===== `VAR_POP` +==== `VAR_POP` .Synopsis: [source, sql] -------------------------------------------------- -VAR_POP(field_name<1>) +VAR_POP(field_name) <1> -------------------------------------------------- *Input*: diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index 63138bcfce88b..ae9ab87627946 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-conditional]] === Conditional Functions -beta[] - Functions that return one of their arguments by evaluating in an if-else manner. [[sql-functions-conditional-coalesce]] @@ -13,7 +11,10 @@ Functions that return one of their arguments by evaluating in an if-else manner. .Synopsis: [source, sql] ---- -COALESCE(expression<1>, expression<2>, ...) +COALESCE( + expression, <1> + expression, <2> + ...) ---- *Input*: @@ -37,24 +38,26 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNull] ---- - -[[sql-functions-conditional-ifnull]] -==== `IFNULL` +[[sql-functions-conditional-greatest]] +==== `GREATEST` .Synopsis: [source, sql] ---- -IFNULL(expression<1>, expression<2>) +GREATEST( + expression, <1> + expression, <2> + ...) ---- *Input*: @@ -63,35 +66,41 @@ IFNULL(expression<1>, expression<2>) <2> 2nd expression +... -*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. +**N**th expression + +GREATEST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Variant of <> with only two arguments. -Returns the first of its arguments that is not null. +Returns the argument that has the largest value which is not null. If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnFirst] +include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnSecond] +include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNull] ---- - -[[sql-functions-conditional-isnull]] -==== `ISNULL` +[[sql-functions-conditional-ifnull]] +==== `IFNULL` .Synopsis: [source, sql] ---- -ISNULL(expression<1>, expression<2>) +IFNULL( + expression, <1> + expression) <2> ---- *Input*: @@ -111,24 +120,26 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[isNullReturnFirst] +include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[isNullReturnSecond] +include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnSecond] ---- -[[sql-functions-conditional-nvl]] -==== `NVL` +[[sql-functions-conditional-isnull]] +==== `ISNULL` .Synopsis: [source, sql] ---- -NVL(expression<1>, expression<2>) +ISNULL( + expression, <1> + expression) <2> ---- *Input*: @@ -148,24 +159,27 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[nvlReturnFirst] +include-tagged::{sql-specs}/docs.csv-spec[isNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[nvlReturnSecond] +include-tagged::{sql-specs}/docs.csv-spec[isNullReturnSecond] ---- -[[sql-functions-conditional-nullif]] -==== `NULLIF` +[[sql-functions-conditional-least]] +==== `LEAST` .Synopsis: [source, sql] ---- -NULLIF(expression<1>, expression<2>) +LEAST( + expression, <1> + expression, <2> + ...) ---- *Input*: @@ -174,33 +188,42 @@ NULLIF(expression<1>, expression<2>) <2> 2nd expression +... + +**N**th expression -*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. +LEAST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Returns `null` when the two input expressions are equal and -if not, it returns the 1st expression. +Returns the argument that has the smallest value which is not null. +If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] + +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnFirst] +include-tagged::{sql-specs}/docs.csv-spec[leastReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnNull] +include-tagged::{sql-specs}/docs.csv-spec[leastReturnNull] ---- -[[sql-functions-conditional-greatest]] -==== `GREATEST` +[[sql-functions-conditional-nullif]] +==== `NULLIF` .Synopsis: [source, sql] ---- -GREATEST(expression<1>, expression<2>, ...) +NULLIF( + expression, <1> + expression) <2> ---- *Input*: @@ -209,40 +232,35 @@ GREATEST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression -GREATEST can take an arbitrary number of arguments and -all of them must be of the same data type. - -*Output*: one of the expressions or `null` +*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. .Description -Returns the argument that has the largest value which is not null. -If all arguments are null, then it returns `null`. - +Returns `null` when the two input expressions are equal and +if not, it returns the 1st expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNonNull] +include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNull] +include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnNull] ---- -[[sql-functions-conditional-least]] -==== `LEAST` +[[sql-functions-conditional-nvl]] +==== `NVL` .Synopsis: [source, sql] ---- -LEAST(expression<1>, expression<2>, ...) +NVL( + expression, <1> + expression) <2> ---- *Input*: @@ -251,28 +269,25 @@ LEAST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression - -LEAST can take an arbitrary number of arguments and -all of them must be of the same data type. -*Output*: one of the expressions or `null` +*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. .Description -Returns the argument that has the smallest value which is not null. +Variant of <> with only two arguments. +Returns the first of its arguments that is not null. If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[leastReturnNonNull] +include-tagged::{sql-specs}/docs.csv-spec[nvlReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- -include-tagged::{sql-specs}/docs.csv-spec[leastReturnNull] +include-tagged::{sql-specs}/docs.csv-spec[nvlReturnSecond] ---- + + diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index 2d2678a61704d..a9f8c398b15c5 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-datetime]] === Date/Time and Interval Functions and Operators -beta[] - {es-sql} offers a wide range of facilities for performing date/time manipulations. [[sql-functions-datetime-interval]] @@ -59,39 +57,39 @@ s|Description Basic arithmetic operators (`+`, `-`, etc) support date/time parameters as indicated below: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtIntervalPlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtDateTimePlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtDateTimeMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMul] -------------------------------------------------- ==== Functions -beta[] +Functions that target date/time. [[sql-functions-current-date]] ==== `CURRENT_DATE/CURDATE` @@ -101,6 +99,7 @@ beta[] -------------------------------------------------- CURRENT_DATE CURRENT_DATE() +CURDATE() -------------------------------------------------- *Input*: _none_ @@ -117,12 +116,17 @@ Unlike CURRENT_DATE, `CURDATE()` can only be used as a function with no argument This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs.csv-spec[currentDate] +-------------------------------------------------- + +[source, sql] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curDate] +include-tagged::{sql-specs}/docs.csv-spec[currentDateFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[curDateFunction] -------------------------------------------------- @@ -130,7 +134,7 @@ include-tagged::{sql-specs}/docs.csv-spec[curDateFunction] Typically, this function (as well as its twin <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[filterToday] -------------------------------------------------- @@ -142,7 +146,7 @@ include-tagged::{sql-specs}/docs.csv-spec[filterToday] [source, sql] -------------------------------------------------- CURRENT_TIMESTAMP -CURRENT_TIMESTAMP(precision <1>) +CURRENT_TIMESTAMP([precision]) <1> -------------------------------------------------- *Input*: @@ -155,21 +159,22 @@ CURRENT_TIMESTAMP(precision <1>) Returns the date/time when the current query reached the server. As a function, `CURRENT_TIMESTAMP()` accepts _precision_ as an optional -parameter for rounding the second fractional digits (nanoseconds). +parameter for rounding the second fractional digits (nanoseconds). The default _precision_ is 3, +meaning a milliseconds precision current date/time will be returned. This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[curTs] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[curTsFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[curTsFunctionPrecision] -------------------------------------------------- @@ -177,18 +182,22 @@ include-tagged::{sql-specs}/docs.csv-spec[curTsFunctionPrecision] Typically, this function (as well as its twin <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[filterNow] -------------------------------------------------- +[IMPORTANT] +Currently, using a _precision_ greater than 3 doesn't make any difference to the output of the +function as the maximum number of second fractional digits returned is 3 (milliseconds). + [[sql-functions-datetime-day]] ==== `DAY_OF_MONTH/DOM/DAY` .Synopsis: [source, sql] -------------------------------------------------- -DAY_OF_MONTH(datetime_exp<1>) +DAY_OF_MONTH(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -201,7 +210,7 @@ DAY_OF_MONTH(datetime_exp<1>) Extract the day of the month from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dayOfMonth] -------------------------------------------------- @@ -212,7 +221,7 @@ include-tagged::{sql-specs}/docs.csv-spec[dayOfMonth] .Synopsis: [source, sql] -------------------------------------------------- -DAY_OF_WEEK(datetime_exp<1>) +DAY_OF_WEEK(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -225,7 +234,7 @@ DAY_OF_WEEK(datetime_exp<1>) Extract the day of the week from a date/datetime. Sunday is `1`, Monday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dayOfWeek] -------------------------------------------------- @@ -236,7 +245,7 @@ include-tagged::{sql-specs}/docs.csv-spec[dayOfWeek] .Synopsis: [source, sql] -------------------------------------------------- -DAY_OF_YEAR(datetime_exp<1>) +DAY_OF_YEAR(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -249,7 +258,7 @@ DAY_OF_YEAR(datetime_exp<1>) Extract the day of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dayOfYear] -------------------------------------------------- @@ -260,7 +269,7 @@ include-tagged::{sql-specs}/docs.csv-spec[dayOfYear] .Synopsis: [source, sql] -------------------------------------------------- -DAY_NAME(datetime_exp<1>) +DAY_NAME(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -273,7 +282,7 @@ DAY_NAME(datetime_exp<1>) Extract the day of the week from a date/datetime in text format (`Monday`, `Tuesday`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dayName] -------------------------------------------------- @@ -284,7 +293,7 @@ include-tagged::{sql-specs}/docs.csv-spec[dayName] .Synopsis: [source, sql] -------------------------------------------------- -HOUR_OF_DAY(datetime_exp<1>) +HOUR_OF_DAY(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -297,7 +306,7 @@ HOUR_OF_DAY(datetime_exp<1>) Extract the hour of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[hourOfDay] -------------------------------------------------- @@ -308,7 +317,7 @@ include-tagged::{sql-specs}/docs.csv-spec[hourOfDay] .Synopsis: [source, sql] -------------------------------------------------- -ISO_DAY_OF_WEEK(datetime_exp<1>) +ISO_DAY_OF_WEEK(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -322,7 +331,7 @@ ISO_DAY_OF_WEEK(datetime_exp<1>) Extract the day of the week from a date/datetime, following the https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. Monday is `1`, Tuesday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[isoDayOfWeek] -------------------------------------------------- @@ -333,7 +342,7 @@ include-tagged::{sql-specs}/docs.csv-spec[isoDayOfWeek] .Synopsis: [source, sql] -------------------------------------------------- -ISO_WEEK_OF_YEAR(datetime_exp<1>) +ISO_WEEK_OF_YEAR(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -347,7 +356,7 @@ ISO_WEEK_OF_YEAR(datetime_exp<1>) Extract the week of the year from a date/datetime, following https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. The first week of a year is the first week with a majority (4 or more) of its days in January. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[isoWeekOfYear] -------------------------------------------------- @@ -358,7 +367,7 @@ include-tagged::{sql-specs}/docs.csv-spec[isoWeekOfYear] .Synopsis: [source, sql] -------------------------------------------------- -MINUTE_OF_DAY(datetime_exp<1>) +MINUTE_OF_DAY(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -371,7 +380,7 @@ MINUTE_OF_DAY(datetime_exp<1>) Extract the minute of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[minuteOfDay] -------------------------------------------------- @@ -382,7 +391,7 @@ include-tagged::{sql-specs}/docs.csv-spec[minuteOfDay] .Synopsis: [source, sql] -------------------------------------------------- -MINUTE_OF_HOUR(datetime_exp<1>) +MINUTE_OF_HOUR(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -395,7 +404,7 @@ MINUTE_OF_HOUR(datetime_exp<1>) Extract the minute of the hour from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[minuteOfHour] -------------------------------------------------- @@ -406,7 +415,7 @@ include-tagged::{sql-specs}/docs.csv-spec[minuteOfHour] .Synopsis: [source, sql] -------------------------------------------------- -MONTH(datetime_exp<1>) +MONTH(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -419,7 +428,7 @@ MONTH(datetime_exp<1>) Extract the month of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[monthOfYear] -------------------------------------------------- @@ -430,7 +439,7 @@ include-tagged::{sql-specs}/docs.csv-spec[monthOfYear] .Synopsis: [source, sql] -------------------------------------------------- -MONTH_NAME(datetime_exp<1>) +MONTH_NAME(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -443,7 +452,7 @@ MONTH_NAME(datetime_exp<1>) Extract the month from a date/datetime in text format (`January`, `February`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[monthName] -------------------------------------------------- @@ -467,7 +476,7 @@ This function offers the same functionality as <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[filterNow] -------------------------------------------------- @@ -486,7 +495,7 @@ include-tagged::{sql-specs}/docs.csv-spec[filterNow] .Synopsis: [source, sql] -------------------------------------------------- -SECOND_OF_MINUTE(datetime_exp<1>) +SECOND_OF_MINUTE(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -499,7 +508,7 @@ SECOND_OF_MINUTE(datetime_exp<1>) Extract the second of the minute from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[secondOfMinute] -------------------------------------------------- @@ -510,7 +519,7 @@ include-tagged::{sql-specs}/docs.csv-spec[secondOfMinute] .Synopsis: [source, sql] -------------------------------------------------- -QUARTER(datetime_exp<1>) +QUARTER(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -523,7 +532,7 @@ QUARTER(datetime_exp<1>) Extract the year quarter the date/datetime falls in. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[quarter] -------------------------------------------------- @@ -547,7 +556,7 @@ This function offers the same functionality as <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[filterToday] -------------------------------------------------- @@ -566,7 +575,7 @@ include-tagged::{sql-specs}/docs.csv-spec[filterToday] .Synopsis: [source, sql] -------------------------------------------------- -WEEK_OF_YEAR(datetime_exp<1>) +WEEK_OF_YEAR(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -579,7 +588,7 @@ WEEK_OF_YEAR(datetime_exp<1>) Extract the week of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[weekOfYear] -------------------------------------------------- @@ -590,7 +599,7 @@ include-tagged::{sql-specs}/docs.csv-spec[weekOfYear] .Synopsis: [source, sql] -------------------------------------------------- -YEAR(datetime_exp<1>) +YEAR(datetime_exp) <1> -------------------------------------------------- *Input*: @@ -603,7 +612,7 @@ YEAR(datetime_exp<1>) Extract the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[year] -------------------------------------------------- @@ -614,7 +623,9 @@ include-tagged::{sql-specs}/docs.csv-spec[year] .Synopsis: [source, sql] -------------------------------------------------- -EXTRACT(datetime_function<1> FROM datetime_exp<2>) +EXTRACT( + datetime_function <1> + FROM datetime_exp) <2> -------------------------------------------------- *Input*: @@ -629,14 +640,14 @@ EXTRACT(datetime_function<1> FROM datetime_exp<2>) Extract fields from a date/datetime by specifying the name of a <>. The following -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[extractDayOfYear] -------------------------------------------------- is the equivalent to -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[dayOfYear] -------------------------------------------------- diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 261066799f893..5a8ae2e1fec24 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-grouping]] === Grouping Functions -beta[] - Functions for creating special __grouping__s (also known as _bucketing_); as such these need to be used as part of the <>. @@ -14,8 +12,13 @@ as part of the <>. .Synopsis: [source, sql] ---- -HISTOGRAM(numeric_exp<1>, numeric_interval<2>) -HISTOGRAM(date_exp<3>, date_time_interval<4>) +HISTOGRAM( + numeric_exp, <1> + numeric_interval) <2> + +HISTOGRAM( + date_exp, <3> + date_time_interval) <4> ---- *Input*: @@ -41,14 +44,14 @@ NOTE:: The histogram in SQL does *NOT* return empty buckets for missing interval `Histogram` can be applied on either numeric fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[histogramNumeric] ---- or date/time fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[histogramDateTime] ---- @@ -56,14 +59,14 @@ include-tagged::{sql-specs}/docs.csv-spec[histogramDateTime] Expressions inside the histogram are also supported as long as the return type is numeric: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[histogramNumericExpression] ---- Do note that histograms (and grouping functions in general) allow custom expressions but cannot have any functions applied to them in the `GROUP BY`. In other words, the following statement is *NOT* allowed: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[expressionOnHistogramNotAllowed] ---- @@ -72,7 +75,7 @@ as it requires two groupings (one for histogram followed by a second for applyin Instead one can rewrite the query to move the expression on the histogram _inside_ of it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[histogramDateTimeExpression] ---- diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 85c2e25f86517..c660d29e2b964 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -3,22 +3,140 @@ [[sql-functions]] == Functions and Operators -beta[] - {es-sql} provides a comprehensive set of built-in operators and functions: * <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +* <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> +** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> +* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> include::operators.asciidoc[] +include::like-rlike.asciidoc[] include::aggs.asciidoc[] include::grouping.asciidoc[] include::date-time.asciidoc[] diff --git a/docs/reference/sql/functions/like-rlike.asciidoc b/docs/reference/sql/functions/like-rlike.asciidoc new file mode 100644 index 0000000000000..665d3e1fe8664 --- /dev/null +++ b/docs/reference/sql/functions/like-rlike.asciidoc @@ -0,0 +1,104 @@ +[role="xpack"] +[testenv="basic"] +[[sql-like-rlike-operators]] +=== LIKE and RLIKE Operators + +`LIKE` and `RLIKE` operators are commonly used to filter data based on string patterns. They usually act on a field placed on the left-hand side of +the operator, but can also act on a constant (literal) expression. The right-hand side of the operator represents the pattern. +Both can be used in the `WHERE` clause of the `SELECT` statement, but `LIKE` can also be used in other places, such as defining an +<> or across various <>. +This section covers only the `SELECT ... WHERE ...` usage. + +NOTE: One significant difference between `LIKE`/`RLIKE` and the <> is that the former +act on <> while the latter also work on <> fields. If the field used with `LIKE`/`RLIKE` doesn't +have an exact not-normalized sub-field (of <> type) {es-sql} will not be able to run the query. If the field is either exact +or has an exact sub-field, it will use it as is, or it will automatically use the exact sub-field even if it wasn't explicitly specified in the statement. + +[[sql-like-operator]] +==== `LIKE` + +.Synopsis: +[source, sql] +-------------------------------------------------- +expression <1> +LIKE constant_exp <2> +-------------------------------------------------- + +<1> typically a field, or a constant expression +<2> pattern + +.Description: + +The SQL `LIKE` operator is used to compare a value to similar values using wildcard operators. There are two wildcards used in conjunction +with the `LIKE` operator: + +* The percent sign (%) +* The underscore (_) + +The percent sign represents zero, one or multiple characters. The underscore represents a single number or character. These symbols can be +used in combinations. + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[simpleLike] +---- + +There is, also, the possibility of using an escape character if one needs to match the wildcard characters themselves. This can be done +by using the `ESCAPE [escape_character]` statement after the `LIKE ...` operator: + + SELECT name, author FROM library WHERE name LIKE 'Dune/%' ESCAPE '/'; + +In the example above `/` is defined as an escape character which needs to be placed before the `%` or `_` characters if one needs to +match those characters in the pattern specifically. By default, there is no escape character defined. + +IMPORTANT: Even though `LIKE` is a valid option when searching or filtering in {es-sql}, full-text search predicates +`MATCH` and `QUERY` are <>. + +[[sql-rlike-operator]] +==== `RLIKE` + +.Synopsis: +[source, sql] +-------------------------------------------------- +expression <1> +RLIKE constant_exp <2> +-------------------------------------------------- + +<1> typically a field, or a constant expression +<2> pattern + +.Description: + +This operator is similar to `LIKE`, but the user is not limited to search for a string based on a fixed pattern with the percent sign (`%`) +and underscore (`_`); the pattern in this case is a regular expression which allows the construction of more flexible patterns. + +For more details about the regular expressions syntax, https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/regex/Pattern.html[Java's Pattern class javadoc] +is a good starting point. + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[simpleRLike] +---- + +IMPORTANT: Even though `RLIKE` is a valid option when searching or filtering in {es-sql}, full-text search predicates +`MATCH` and `QUERY` are <>. + +[[sql-like-prefer-full-text]] +==== Prefer full-text search predicates + +When using `LIKE`/`RLIKE`, do consider using <> which are faster, much more powerful +and offer the option of sorting by relevancy (results can be returned based on how well they matched). + +For example: + +[cols=") +ABS(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -35,12 +35,12 @@ include-tagged::{sql-specs}/docs.csv-spec[abs] -------------------------------------------------- [[sql-functions-math-cbrt]] -===== `CBRT` +==== `CBRT` .Synopsis: [source, sql] -------------------------------------------------- -CBRT(numeric_exp<1>) +CBRT(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -59,12 +59,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineCbrtWithNegativeValue] -------------------------------------------------- [[sql-functions-math-ceil]] -===== `CEIL/CEILING` +==== `CEIL/CEILING` .Synopsis: [source, sql] -------------------------------------------------- -CEIL(numeric_exp<1>) +CEIL(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -83,7 +83,7 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineCeiling] -------------------------------------------------- [[sql-functions-math-e]] -===== `E` +==== `E` .Synopsis: [source, sql] @@ -105,12 +105,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathEulersNumber] -------------------------------------------------- [[sql-functions-math-exp]] -===== `EXP` +==== `EXP` .Synopsis: [source, sql] -------------------------------------------------- -EXP(numeric_exp<1>) +EXP(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -129,12 +129,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathExpInline] -------------------------------------------------- [[sql-functions-math-expm1]] -===== `EXPM1` +==== `EXPM1` .Synopsis: [source, sql] -------------------------------------------------- -EXPM1(numeric_exp<1>) +EXPM1(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -153,12 +153,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathExpm1Inline] -------------------------------------------------- [[sql-functions-math-floor]] -===== `FLOOR` +==== `FLOOR` .Synopsis: [source, sql] -------------------------------------------------- -FLOOR(numeric_exp<1>) +FLOOR(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -177,12 +177,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineFloor] -------------------------------------------------- [[sql-functions-math-log]] -===== `LOG` +==== `LOG` .Synopsis: [source, sql] -------------------------------------------------- -LOG(numeric_exp<1>) +LOG(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -201,12 +201,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineLog] -------------------------------------------------- [[sql-functions-math-log10]] -===== `LOG10` +==== `LOG10` .Synopsis: [source, sql] -------------------------------------------------- -LOG10(numeric_exp<1>) +LOG10(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -225,7 +225,7 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineLog10] -------------------------------------------------- [[sql-functions-math-pi]] -===== `PI` +==== `PI` .Synopsis: [source, sql] @@ -247,12 +247,14 @@ include-tagged::{sql-specs}/docs.csv-spec[mathPINumber] -------------------------------------------------- [[sql-functions-math-power]] -===== `POWER` +==== `POWER` .Synopsis: [source, sql] -------------------------------------------------- -POWER(numeric_exp<1>, integer_exp<2>) +POWER( + numeric_exp, <1> + integer_exp) <2> -------------------------------------------------- *Input*: @@ -277,12 +279,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlinePowerNegative] -------------------------------------------------- [[sql-functions-math-random]] -===== `RANDOM/RAND` +==== `RANDOM/RAND` .Synopsis: [source, sql] -------------------------------------------------- -RANDOM(seed<1>) +RANDOM(seed) <1> -------------------------------------------------- *Input*: @@ -301,12 +303,14 @@ include-tagged::{sql-specs}/docs.csv-spec[mathRandom] -------------------------------------------------- [[sql-functions-math-round]] -===== `ROUND` +==== `ROUND` .Synopsis: [source, sql] ---- -ROUND(numeric_exp<1>[, integer_exp<2>]) +ROUND( + numeric_exp <1> + [, integer_exp]) <2> ---- *Input*: @@ -332,12 +336,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathRoundWithNegativeParameter] -------------------------------------------------- [[sql-functions-math-sign]] -===== `SIGN/SIGNUM` +==== `SIGN/SIGNUM` .Synopsis: [source, sql] -------------------------------------------------- -SIGN(numeric_exp<1>) +SIGN(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -357,12 +361,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineSign] [[sql-functions-math-sqrt]] -===== `SQRT` +==== `SQRT` .Synopsis: [source, sql] -------------------------------------------------- -SQRT(numeric_exp<1>) +SQRT(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -381,12 +385,14 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineSqrt] -------------------------------------------------- [[sql-functions-math-truncate]] -===== `TRUNCATE` +==== `TRUNCATE` .Synopsis: [source, sql] ---- -TRUNCATE(numeric_exp<1>[, integer_exp<2>]) +TRUNCATE( + numeric_exp <1> + [, integer_exp]) <2> ---- *Input*: @@ -411,15 +417,17 @@ include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithPositiveParameter] include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithNegativeParameter] -------------------------------------------------- -==== Trigonometric +[[sql-functions-math-trigonometric]] +[float] +=== Trigonometric [[sql-functions-math-acos]] -===== `ACOS` +==== `ACOS` .Synopsis: [source, sql] -------------------------------------------------- -ACOS(numeric_exp<1>) +ACOS(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -438,12 +446,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineAcos] -------------------------------------------------- [[sql-functions-math-asin]] -===== `ASIN` +==== `ASIN` .Synopsis: [source, sql] -------------------------------------------------- -ASIN(numeric_exp<1>) +ASIN(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -462,12 +470,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineAsin] -------------------------------------------------- [[sql-functions-math-atan]] -===== `ATAN` +==== `ATAN` .Synopsis: [source, sql] -------------------------------------------------- -ATAN(numeric_exp<1>) +ATAN(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -486,12 +494,14 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineAtan] -------------------------------------------------- [[sql-functions-math-atan2]] -===== `ATAN2` +==== `ATAN2` .Synopsis: [source, sql] -------------------------------------------------- -ATAN2(ordinate<1>, abscisa<2>) +ATAN2( + ordinate, <1> + abscisa) <2> -------------------------------------------------- *Input*: @@ -511,12 +521,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineAtan2] -------------------------------------------------- [[sql-functions-math-cos]] -===== `COS` +==== `COS` .Synopsis: [source, sql] -------------------------------------------------- -COS(numeric_exp<1>) +COS(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -535,12 +545,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineCosine] -------------------------------------------------- [[sql-functions-math-cosh]] -===== `COSH` +==== `COSH` .Synopsis: [source, sql] -------------------------------------------------- -COSH(numeric_exp<1>) +COSH(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -559,12 +569,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineCosh] -------------------------------------------------- [[sql-functions-math-cot]] -===== `COT` +==== `COT` .Synopsis: [source, sql] -------------------------------------------------- -COT(numeric_exp<1>) +COT(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -583,12 +593,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineCotangent] -------------------------------------------------- [[sql-functions-math-degrees]] -===== `DEGREES` +==== `DEGREES` .Synopsis: [source, sql] -------------------------------------------------- -DEGREES(numeric_exp<1>) +DEGREES(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -608,12 +618,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineDegrees] -------------------------------------------------- [[sql-functions-math-radians]] -===== `RADIANS` +==== `RADIANS` .Synopsis: [source, sql] -------------------------------------------------- -RADIANS(numeric_exp<1>) +RADIANS(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -633,12 +643,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineRadians] -------------------------------------------------- [[sql-functions-math-sin]] -===== `SIN` +==== `SIN` .Synopsis: [source, sql] -------------------------------------------------- -SIN(numeric_exp<1>) +SIN(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -657,12 +667,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineSine] -------------------------------------------------- [[sql-functions-math-sinh]] -===== `SINH` +==== `SINH` .Synopsis: [source, sql] -------------------------------------------------- -SINH(numeric_exp<1>) +SINH(numeric_exp) <1> -------------------------------------------------- *Input*: @@ -681,12 +691,12 @@ include-tagged::{sql-specs}/docs.csv-spec[mathInlineSinh] -------------------------------------------------- [[sql-functions-math-tan]] -===== `TAN` +==== `TAN` .Synopsis: [source, sql] -------------------------------------------------- -TAN(numeric_exp<1>) +TAN(numeric_exp) <1> -------------------------------------------------- *Input*: diff --git a/docs/reference/sql/functions/operators.asciidoc b/docs/reference/sql/functions/operators.asciidoc index a5d522b7e5b96..b17da57f9f483 100644 --- a/docs/reference/sql/functions/operators.asciidoc +++ b/docs/reference/sql/functions/operators.asciidoc @@ -3,60 +3,65 @@ [[sql-operators]] === Comparison Operators -beta[] - Boolean operator for comparing against one or multiple expressions. -* Equality (`=`) +[[sql-operators-equality]] +==== `Equality (=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] -------------------------------------------------- -* Null safe Equality (`<=>`) +[[sql-operators-null-safe-equality]] +==== `Null safe Equality (<=>)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[nullEqualsCompareWithNull] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[nullEqualsCompareTwoNulls] -------------------------------------------------- -* Inequality (`<>` or `!=`) +[[sql-operators-inequality]] +==== `Inequality (<> or !=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] -------------------------------------------------- -* Comparison (`<`, `<=`, `>`, `>=`) +[[sql-operators-comparison]] +==== `Comparison (<, <=, >, >=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] -------------------------------------------------- -* `BETWEEN` +[[sql-operators-between]] +==== `BETWEEN` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereBetween] -------------------------------------------------- -* `IS NULL/IS NOT NULL` +[[sql-operators-is-null]] +==== `IS NULL/IS NOT NULL` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] -------------------------------------------------- -* `IN (, , ...)` +[[sql-operators-in]] +==== `IN (, , ...)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereWithInAndMultipleValues] -------------------------------------------------- @@ -64,27 +69,28 @@ include-tagged::{sql-specs}/filter.sql-spec[whereWithInAndMultipleValues] [[sql-operators-logical]] === Logical Operators -beta[] - Boolean operator for evaluating one or two expressions. -* `AND` +[[sql-operators-and]] +==== `AND` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] -------------------------------------------------- -* `OR` +[[sql-operators-or]] +==== `OR` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] -------------------------------------------------- -* `NOT` +[[sql-operators-not]] +==== `NOT` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] -------------------------------------------------- @@ -92,49 +98,53 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] [[sql-operators-math]] === Math Operators -beta[] - Perform mathematical operations affecting one or two values. The result is a value of numeric type. -* Add (`+`) +[[sql-operators-plus]] +==== `Add (+)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[plus] -------------------------------------------------- -* Subtract (infix `-`) +[[sql-operators-subtract]] +==== `Subtract (infix -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[minus] -------------------------------------------------- -* Negate (unary `-`) +[[sql-operators-negate]] +==== `Negate (unary -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] -------------------------------------------------- -* Multiply (`*`) +[[sql-operators-multiply]] +==== `Multiply (*)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] -------------------------------------------------- -* Divide (`/`) +[[sql-operators-divide]] +==== `Divide (/)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[divide] -------------------------------------------------- -* https://en.wikipedia.org/wiki/Modulo_operation[Modulo] or Reminder(`%`) +[[sql-operators-remainder]] +==== `Modulo or Remainder(%)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[mod] -------------------------------------------------- diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 95ebfa942ae4a..e355c5f8c0fe1 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -3,13 +3,128 @@ [[sql-functions-search]] === Full-Text Search Functions -beta[] - Search functions should be used when performing full-text search, namely when the `MATCH` or `QUERY` predicates are being used. Outside a, so-called, search context, these functions will return default values such as `0` or `NULL`. +[[sql-functions-search-match]] +==== `MATCH` + +.Synopsis: +[source, sql] +-------------------------------------------------- +MATCH( + field_exp, <1> + constant_exp <2> + [, options]) <3> +-------------------------------------------------- + +*Input*: + +<1> field(s) to match +<2> matching text +<3> additional parameters; optional + +.Description: + +A full-text search option, in the form of a predicate, available in {es-sql} that gives the user control over powerful <> +and <> {es} queries. + +The first parameter is the field or fields to match against. In case it receives one value only, {es-sql} will use a `match` query to perform the search: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[simpleMatch] +---- + +However, it can also receive a list of fields and their corresponding optional `boost` value. In this case, {es-sql} will use a +`multi_match` query to match the documents: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[multiFieldsMatch] +---- + +NOTE: The `multi_match` query in {es} has the option of <> that gives preferential weight +(in terms of scoring) to fields being searched in, using the `^` character. In the example above, the `name` field has a greater weight in +the final score than the `author` field when searching for `frank dune` text in both of them. + +Both options above can be used in combination with the optional third parameter of the `MATCH()` predicate, where one can specify +additional configuration parameters (separated by semicolon `;`) for either `match` or `multi_match` queries. For example: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[optionalParamsForMatch] +---- + +In the more advanced example above, the `cutoff_frequency` parameter allows specifying an absolute or relative document frequency where +high frequency terms are moved into an optional subquery and are only scored if one of the low frequency (below the cutoff) terms in the +case of an `or` operator or all of the low frequency terms in the case of an `and` operator match. More about this you can find in the +<> page. + +NOTE: The allowed optional parameters for a single-field `MATCH()` variant (for the `match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`. + +NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for the `multi_match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`, `slop`, `tie_breaker`, `type`. + + +[[sql-functions-search-query]] +==== `QUERY` + +.Synopsis: +[source, sql] +-------------------------------------------------- +QUERY( + constant_exp <1> + [, options]) <2> +-------------------------------------------------- + +*Input*: + +<1> query text +<2> additional parameters; optional + +.Description: + +Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user control over the <> query in {es}. + +The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` +accepts in its `query` field can be used here as well: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[simpleQueryQuery] +---- + +A more advanced example, showing more of the features that `query_string` supports, of course possible with {es-sql}: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[advancedQueryQuery] +---- + +The query above uses the `_exists_` query to select documents that have values in the `author` field, a range query for `page_count` and +regex and fuzziness queries for the `name` field. + +If one needs to customize various configuration options that `query_string` exposes, this can be done using the second _optional_ parameter. +Multiple settings can be specified separated by a semicolon `;`: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[optionalParameterQuery] +---- + +NOTE: The allowed optional parameters for `QUERY()` are: `allow_leading_wildcard`, `analyze_wildcard`, `analyzer`, +`auto_generate_synonyms_phrase_query`, `default_field`, `default_operator`, `enable_position_increments`, +`escape`, `fuzziness`, `fuzzy_max_expansions`, `fuzzy_prefix_length`, `fuzzy_rewrite`, `fuzzy_transpositions`, +`lenient`, `max_determinized_states`, `minimum_should_match`, `phrase_slop`, `rewrite`, `quote_analyzer`, +`quote_field_suffix`, `tie_breaker`, `time_zone`, `type`. + + [[sql-functions-search-score]] ==== `SCORE` @@ -34,14 +149,14 @@ combined using the same rules as {es}'s Typically `SCORE` is used for ordering the results of a query based on their relevance: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[orderByScore] ---- However, it is perfectly fine to return the score without sorting by it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[scoreWithMatch] ---- diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc index a2832b5525dc9..149bf093553df 100644 --- a/docs/reference/sql/functions/string.asciidoc +++ b/docs/reference/sql/functions/string.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-string]] === String Functions -beta[] - Functions for performing string manipulation. [[sql-functions-string-ascii]] @@ -13,7 +11,7 @@ Functions for performing string manipulation. .Synopsis: [source, sql] -------------------------------------------------- -ASCII(string_exp<1>) +ASCII(string_exp) <1> -------------------------------------------------- *Input*: @@ -26,7 +24,7 @@ ASCII(string_exp<1>) Returns the ASCII code value of the leftmost character of `string_exp` as an integer. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringAscii] -------------------------------------------------- @@ -37,7 +35,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringAscii] .Synopsis: [source, sql] -------------------------------------------------- -BIT_LENGTH(string_exp<1>) +BIT_LENGTH(string_exp) <1> -------------------------------------------------- *Input*: @@ -49,7 +47,7 @@ BIT_LENGTH(string_exp<1>) Returns the length in bits of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringBitLength] -------------------------------------------------- @@ -60,7 +58,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringBitLength] .Synopsis: [source, sql] -------------------------------------------------- -CHAR(code<1>) +CHAR(code) <1> -------------------------------------------------- *Input*: @@ -72,7 +70,7 @@ CHAR(code<1>) Returns the character that has the ASCII code value specified by the numeric input. The value should be between 0 and 255; otherwise, the return value is data source–dependent. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringChar] -------------------------------------------------- @@ -83,7 +81,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringChar] .Synopsis: [source, sql] -------------------------------------------------- -CHAR_LENGTH(string_exp<1>) +CHAR_LENGTH(string_exp) <1> -------------------------------------------------- *Input*: @@ -95,7 +93,7 @@ CHAR_LENGTH(string_exp<1>) Returns the length in characters of the input, if the string expression is of a character data type; otherwise, returns the length in bytes of the string expression (the smallest integer not less than the number of bits divided by 8). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringCharLength] -------------------------------------------------- @@ -106,7 +104,9 @@ include-tagged::{sql-specs}/docs.csv-spec[stringCharLength] .Synopsis: [source, sql] -------------------------------------------------- -CONCAT(string_exp1<1>,string_exp2<2>) +CONCAT( + string_exp1, <1> + string_exp2) <2> -------------------------------------------------- *Input*: @@ -119,7 +119,7 @@ CONCAT(string_exp1<1>,string_exp2<2>) Returns a character string that is the result of concatenating `string_exp1` to `string_exp2`. If one of the string is `NULL`, the other string will be returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringConcat] -------------------------------------------------- @@ -130,7 +130,11 @@ include-tagged::{sql-specs}/docs.csv-spec[stringConcat] .Synopsis: [source, sql] -------------------------------------------------- -INSERT(source<1>, start<2>, length<3>, replacement<4>) +INSERT( + source, <1> + start, <2> + length, <3> + replacement) <4> -------------------------------------------------- *Input*: @@ -145,7 +149,7 @@ INSERT(source<1>, start<2>, length<3>, replacement<4>) Returns a string where `length` characters have been deleted from `source`, beginning at `start`, and where `replacement` has been inserted into `source`, beginning at `start`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringInsert] -------------------------------------------------- @@ -156,7 +160,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringInsert] .Synopsis: [source, sql] -------------------------------------------------- -LCASE(string_exp<1>) +LCASE(string_exp) <1> -------------------------------------------------- *Input*: @@ -168,7 +172,7 @@ LCASE(string_exp<1>) Returns a string equal to that in `string_exp`, with all uppercase characters converted to lowercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLCase] -------------------------------------------------- @@ -179,7 +183,9 @@ include-tagged::{sql-specs}/docs.csv-spec[stringLCase] .Synopsis: [source, sql] -------------------------------------------------- -LEFT(string_exp<1>, count<2>) +LEFT( + string_exp, <1> + count) <2> -------------------------------------------------- *Input*: @@ -192,7 +198,7 @@ LEFT(string_exp<1>, count<2>) Returns the leftmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLeft] -------------------------------------------------- @@ -203,7 +209,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringLeft] .Synopsis: [source, sql] -------------------------------------------------- -LENGTH(string_exp<1>) +LENGTH(string_exp) <1> -------------------------------------------------- *Input*: @@ -215,7 +221,7 @@ LENGTH(string_exp<1>) Returns the number of characters in `string_exp`, excluding trailing blanks. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLength] -------------------------------------------------- @@ -226,7 +232,11 @@ include-tagged::{sql-specs}/docs.csv-spec[stringLength] .Synopsis: [source, sql] -------------------------------------------------- -LOCATE(pattern<1>, source<2>[, start]<3>) +LOCATE( + pattern, <1> + source <2> + [, start]<3> +) -------------------------------------------------- *Input*: @@ -240,12 +250,12 @@ LOCATE(pattern<1>, source<2>[, start]<3>) Returns the starting position of the first occurrence of `pattern` within `source`. The search for the first occurrence of `pattern` begins with the first character position in `source` unless the optional argument, `start`, is specified. If `start` is specified, the search begins with the character position indicated by the value of `start`. The first character position in `source` is indicated by the value 1. If `pattern` is not found within `source`, the value 0 is returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLocateWoStart] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLocateWithStart] -------------------------------------------------- @@ -256,7 +266,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringLocateWithStart] .Synopsis: [source, sql] -------------------------------------------------- -LTRIM(string_exp<1>) +LTRIM(string_exp) <1> -------------------------------------------------- *Input*: @@ -268,7 +278,7 @@ LTRIM(string_exp<1>) Returns the characters of `string_exp`, with leading blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringLTrim] -------------------------------------------------- @@ -279,7 +289,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringLTrim] .Synopsis: [source, sql] -------------------------------------------------- -OCTET_LENGTH(string_exp<1>) +OCTET_LENGTH(string_exp) <1> -------------------------------------------------- *Input*: @@ -291,7 +301,7 @@ OCTET_LENGTH(string_exp<1>) Returns the length in bytes of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringOctetLength] -------------------------------------------------- @@ -302,7 +312,9 @@ include-tagged::{sql-specs}/docs.csv-spec[stringOctetLength] .Synopsis: [source, sql] -------------------------------------------------- -POSITION(string_exp1<1>, string_exp2<2>) +POSITION( + string_exp1, <1> + string_exp2) <2> -------------------------------------------------- *Input*: @@ -315,7 +327,7 @@ POSITION(string_exp1<1>, string_exp2<2>) Returns the position of the `string_exp1` in `string_exp2`. The result is an exact numeric. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringPosition] -------------------------------------------------- @@ -326,7 +338,9 @@ include-tagged::{sql-specs}/docs.csv-spec[stringPosition] .Synopsis: [source, sql] -------------------------------------------------- -REPEAT(string_exp<1>, count<2>) +REPEAT( + string_exp, <1> + count) <2> -------------------------------------------------- *Input*: @@ -339,7 +353,7 @@ REPEAT(string_exp<1>, count<2>) Returns a character string composed of `string_exp` repeated `count` times. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringRepeat] -------------------------------------------------- @@ -350,7 +364,10 @@ include-tagged::{sql-specs}/docs.csv-spec[stringRepeat] .Synopsis: [source, sql] -------------------------------------------------- -REPLACE(source<1>, pattern<2>, replacement<3>) +REPLACE( + source, <1> + pattern, <2> + replacement) <3> -------------------------------------------------- *Input*: @@ -364,7 +381,7 @@ REPLACE(source<1>, pattern<2>, replacement<3>) Search `source` for occurrences of `pattern`, and replace with `replacement`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringReplace] -------------------------------------------------- @@ -375,7 +392,9 @@ include-tagged::{sql-specs}/docs.csv-spec[stringReplace] .Synopsis: [source, sql] -------------------------------------------------- -RIGHT(string_exp<1>, count<2>) +RIGHT( + string_exp, <1> + count) <2> -------------------------------------------------- *Input*: @@ -388,7 +407,7 @@ RIGHT(string_exp<1>, count<2>) Returns the rightmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringRight] -------------------------------------------------- @@ -399,7 +418,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringRight] .Synopsis: [source, sql] -------------------------------------------------- -RTRIM(string_exp<1>) +RTRIM(string_exp) <1> -------------------------------------------------- *Input*: @@ -411,7 +430,7 @@ RTRIM(string_exp<1>) Returns the characters of `string_exp` with trailing blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringRTrim] -------------------------------------------------- @@ -422,7 +441,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringRTrim] .Synopsis: [source, sql] -------------------------------------------------- -SPACE(count<1>) +SPACE(count) <1> -------------------------------------------------- *Input*: @@ -434,7 +453,7 @@ SPACE(count<1>) Returns a character string consisting of `count` spaces. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringSpace] -------------------------------------------------- @@ -445,7 +464,10 @@ include-tagged::{sql-specs}/docs.csv-spec[stringSpace] .Synopsis: [source, sql] -------------------------------------------------- -SUBSTRING(source<1>, start<2>, length<3>) +SUBSTRING( + source, <1> + start, <2> + length) <3> -------------------------------------------------- *Input*: @@ -459,7 +481,7 @@ SUBSTRING(source<1>, start<2>, length<3>) Returns a character string that is derived from `source`, beginning at the character position specified by `start` for `length` characters. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringSubString] -------------------------------------------------- @@ -470,7 +492,7 @@ include-tagged::{sql-specs}/docs.csv-spec[stringSubString] .Synopsis: [source, sql] -------------------------------------------------- -UCASE(string_exp<1>) +UCASE(string_exp) <1> -------------------------------------------------- *Input*: @@ -482,7 +504,7 @@ UCASE(string_exp<1>) Returns a string equal to that of the input, with all lowercase characters converted to uppercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[stringUCase] -------------------------------------------------- diff --git a/docs/reference/sql/functions/system.asciidoc b/docs/reference/sql/functions/system.asciidoc index 2570a5c856a9c..209e229801590 100644 --- a/docs/reference/sql/functions/system.asciidoc +++ b/docs/reference/sql/functions/system.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-system]] === System Functions -beta[] - These functions return metadata type of information about the system being queried. [[sql-functions-system-database]] @@ -26,7 +24,7 @@ Returns the name of the database being queried. In the case of Elasticsearch SQL is the name of the Elasticsearch cluster. This function should always return a non-null value. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[database] -------------------------------------------------- @@ -48,7 +46,7 @@ USER() Returns the username of the authenticated user executing the query. This function can return `null` in case {stack-ov}/elasticsearch-security.html[Security] is disabled. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs.csv-spec[user] -------------------------------------------------- diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc index b99e9cc5e9c0d..2862aa9e4c868 100644 --- a/docs/reference/sql/functions/type-conversion.asciidoc +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -3,8 +3,6 @@ [[sql-functions-type-conversion]] === Type Conversion Functions -beta[] - Functions for converting an expression of one data type to another. [[sql-functions-type-conversion-cast]] @@ -13,7 +11,9 @@ Functions for converting an expression of one data type to another. .Synopsis: [source, sql] ---- -CAST(expression<1> AS data_type<2>) +CAST( + expression <1> + AS data_type) <2> ---- <1> Expression to cast @@ -25,21 +25,26 @@ Casts the result of the given expression to the target <, data_type<2>) +CONVERT( + expression, <1> + data_type) <2> ---- <1> Expression to convert @@ -59,12 +66,12 @@ Works exactly like <> with slightly differen Moreover, apart from the standard <> it supports the corresponding https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/explicit-data-type-conversion-function?view=sql-server-2017[ODBC data types]. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertODBCDataType] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertESDataType] ---- diff --git a/docs/reference/sql/getting-started.asciidoc b/docs/reference/sql/getting-started.asciidoc index 96cd3433099a1..ffd52060e2b0f 100644 --- a/docs/reference/sql/getting-started.asciidoc +++ b/docs/reference/sql/getting-started.asciidoc @@ -3,8 +3,6 @@ [[sql-getting-started]] == Getting Started with SQL -beta[] - To start using {es-sql}, create an index with some data to experiment with: @@ -42,7 +40,7 @@ Dan Simmons |Hyperion |482 |1989-05-26T00:00:00.000Z Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -------------------------------------------------- // TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] -// TESTRESPONSE[_cat] +// TESTRESPONSE[non_json] You can also use the <>. There is a script to start it shipped in x-pack's bin directory: diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index 821000b8ee213..dc649e2434da5 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -12,11 +12,13 @@ [partintro] -- -beta[] - -X-Pack includes a SQL feature to execute SQL against Elasticsearch +X-Pack includes a SQL feature to execute SQL queries against {es} indices and return results in tabular format. +The following chapters aim to cover everything from usage, to syntax and drivers. +Experience users or those in a hurry might want to jump directly to +the list of SQL <> and <>. + <>:: Overview of {es-sql} and its features. <>:: @@ -24,22 +26,19 @@ indices and return results in tabular format. <>:: Language conventions across SQL and {es}. <>:: - Securing {es-sql} and {es}. + Secure {es-sql} and {es}. <>:: - Accepts SQL in a JSON document, executes it, and returns the - results. + Execute SQL in JSON format over REST. <>:: - Accepts SQL in a JSON document and translates it into a native - Elasticsearch query and returns that. + Translate SQL in JSON format to {es} native query. <>:: - Command-line application that connects to {es} to execute - SQL and print tabular results. + Command-line application for executing SQL against {es}. <>:: - A JDBC driver for {es}. + JDBC driver for {es}. <>:: - An ODBC driver for {es}. + ODBC driver for {es}. <>:: - Documentation for configuring various SQL/BI tools with {es-sql}. + Setup various SQL/BI tools with {es-sql}. <>:: Overview of the {es-sql} language, such as supported data types, commands and syntax. diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index b42620e0c5415..75ede475567c2 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -1,10 +1,7 @@ [role="xpack"] [testenv="basic"] [[sql-data-types]] -== Data Types - -beta[] - +=== Data Types [cols="^,^m,^,^"] @@ -29,7 +26,7 @@ s|SQL precision | <> | keyword | VARCHAR | based on <> | <> | text | VARCHAR | 2,147,483,647 | <> | binary | VARBINARY | 2,147,483,647 -| <> | datetime | TIMESTAMP | 24 +| <> | datetime | TIMESTAMP | 29 | <> | ip | VARCHAR | 39 4+h| Complex types @@ -68,7 +65,7 @@ s|SQL type s|SQL precision -| date | 24 +| date | 29 | interval_year | 7 | interval_month | 7 | interval_day | 23 @@ -88,7 +85,7 @@ s|SQL precision [[sql-multi-field]] [float] -=== SQL and multi-fields +==== SQL and multi-fields A core concept in {es} is that of an `analyzed` field, that is a full-text value that is interpreted in order to be effectively indexed. These fields are of type <> and are not used for sorting or aggregations as their actual value depends on the <> used hence why {es} also offers the <> type for storing the _exact_ diff --git a/docs/reference/sql/language/index-patterns.asciidoc b/docs/reference/sql/language/index-patterns.asciidoc index 434e03d186fc4..06c0ba4c9617b 100644 --- a/docs/reference/sql/language/index-patterns.asciidoc +++ b/docs/reference/sql/language/index-patterns.asciidoc @@ -1,20 +1,20 @@ [role="xpack"] [testenv="basic"] [[sql-index-patterns]] -== Index patterns - -beta[] +=== Index patterns {es-sql} supports two types of patterns for matching multiple indices or tables: -* {es} multi-index +[[sql-index-patterns-multi]] +[float] +==== {es} multi-index The {es} notation for enumerating, including or excluding <> is supported _as long_ as it is quoted or escaped as a table identifier. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -28,21 +28,23 @@ The same kind of patterns can also be used to query multiple indices or tables. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] ---- NOTE: There is the restriction that all resolved concrete tables have the exact same mapping. -* SQL `LIKE` notation +[[sql-index-patterns-like]] +[float] +==== SQL `LIKE` notation The common `LIKE` statement (including escaping if needed) to match a wildcard pattern, based on one `_` or multiple `%` characters. Using `SHOW TABLES` command again: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] ---- @@ -51,7 +53,7 @@ The pattern matches all tables that start with `emp`. This command supports _escaping_ as well, for example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeEscape] ---- diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc index df0cd9359aa16..6ea6a15b3ed64 100644 --- a/docs/reference/sql/language/index.asciidoc +++ b/docs/reference/sql/language/index.asciidoc @@ -3,14 +3,14 @@ [[sql-spec]] == SQL Language -beta[] +This chapter describes the SQL syntax and semantics supported namely: -This chapter describes the SQL semantics supported in X-Pack namely: - -<>:: Data types +<>:: Lexical structure <>:: Commands +<>:: Data types <>:: Index patterns +include::syntax/lexic/index.asciidoc[] +include::syntax/commands/index.asciidoc[] include::data-types.asciidoc[] -include::syntax/index.asciidoc[] include::index-patterns.asciidoc[] diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc similarity index 74% rename from docs/reference/sql/language/syntax/describe-table.asciidoc rename to docs/reference/sql/language/syntax/commands/describe-table.asciidoc index ac07b35ac3367..61609d539be67 100644 --- a/docs/reference/sql/language/syntax/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc @@ -3,12 +3,12 @@ [[sql-syntax-describe-table]] === DESCRIBE TABLE -beta[] - .Synopsis [source, sql] ---- -DESCRIBE [table identifier<1> | [LIKE pattern<2>]] +DESCRIBE + [table identifier | <1> + [LIKE pattern]] <2> ---- <1> single table identifier or double quoted es multi index @@ -18,7 +18,9 @@ or [source, sql] ---- -DESC [table identifier<1>|[LIKE pattern<2>]] +DESC + [table identifier | <1> + [LIKE pattern]] <2> ---- <1> single table identifier or double quoted es multi index @@ -28,7 +30,7 @@ DESC [table identifier<1>|[LIKE pattern<2>]] `DESC` and `DESCRIBE` are aliases to <>. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[describeTable] ---- diff --git a/docs/reference/sql/language/syntax/index.asciidoc b/docs/reference/sql/language/syntax/commands/index.asciidoc similarity index 96% rename from docs/reference/sql/language/syntax/index.asciidoc rename to docs/reference/sql/language/syntax/commands/index.asciidoc index fd20af5c8ff43..78259d07914a8 100644 --- a/docs/reference/sql/language/syntax/index.asciidoc +++ b/docs/reference/sql/language/syntax/commands/index.asciidoc @@ -1,9 +1,7 @@ [role="xpack"] [testenv="basic"] [[sql-commands]] -== SQL Commands - -beta[] +=== SQL Commands This section contains the list of SQL commands supported by {es-sql} along with their syntax: diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc similarity index 87% rename from docs/reference/sql/language/syntax/select.asciidoc rename to docs/reference/sql/language/syntax/commands/select.asciidoc index eb05087fab3fa..82e446cfe3629 100644 --- a/docs/reference/sql/language/syntax/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -3,8 +3,6 @@ [[sql-syntax-select]] === SELECT -beta[] - .Synopsis [source, sql] ---- @@ -38,7 +36,7 @@ The general execution of `SELECT` is as follows: As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword : -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[selectColumnAlias] ---- @@ -48,14 +46,14 @@ which is why it is recommended to specify it. assigned by {es-sql} if no name is given: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[selectInline] ---- or if it's a simple column reference, use its name as the column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[selectColumn] ---- @@ -65,7 +63,7 @@ include-tagged::{sql-specs}/docs.csv-spec[selectColumn] To select all the columns in the source, one can use `*`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[wildcardWithOrder] ---- @@ -73,7 +71,6 @@ include-tagged::{sql-specs}/docs.csv-spec[wildcardWithOrder] which essentially returns all(top-level fields, sub-fields, such as multi-fields are ignored] columns found. [[sql-syntax-from]] -[float] ==== FROM Clause The `FROM` clause specifies one table for the `SELECT` and has the following syntax: @@ -92,14 +89,14 @@ Represents the name (optionally qualified) of an existing table, either a concre If the table name contains special SQL characters (such as `.`,`-`,`*`,etc...) use double quotes to escape them: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[fromTableQuoted] ---- The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] ---- @@ -107,13 +104,12 @@ include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] `alias`:: A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[fromTableAlias] ---- [[sql-syntax-where]] -[float] ==== WHERE Clause The optional `WHERE` clause is used to filter rows from the query and has the following syntax: @@ -129,13 +125,12 @@ where: Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[basicWhere] ---- [[sql-syntax-group-by]] -[float] ==== GROUP BY The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax: @@ -153,34 +148,34 @@ Represents an expression on which rows are being grouped _on_. It can be a colum A common, group by column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByColumn] ---- Grouping by output ordinal: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByOrdinal] ---- Grouping by alias: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByAlias] ---- And grouping by column expression (typically used along-side an alias): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByExpression] ---- Or a mixture of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByMulti] ---- @@ -190,27 +185,26 @@ When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be To wit: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByAndAgg] ---- Expressions over aggregates used in output: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByAndAggExpression] ---- Multiple aggregates used: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByAndMultipleAggs] ---- [[sql-syntax-group-by-implicit]] -[float] ===== Implicit Grouping When an aggregation is used without an associated `GROUP BY`, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single default, or implicit group. @@ -218,20 +212,19 @@ As such, the query emits only a single row (as there is only a single group). A common example is counting the number of records: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitCount] ---- Of course, multiple aggregations can be applied: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitMultipleAggs] ---- [[sql-syntax-having]] -[float] ==== HAVING The `HAVING` clause can be used _only_ along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax: @@ -252,20 +245,19 @@ Both `WHERE` and `HAVING` are used for filtering however there are several signi . `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY`` . `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByHaving] ---- Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByHavingMultiple] ---- [[sql-syntax-having-group-by-implicit]] -[float] ===== Implicit Grouping As indicated above, it is possible to have a `HAVING` clause without a `GROUP BY`. In this case, the so-called <> is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. @@ -273,21 +265,20 @@ As such, the query emits only a single row (as there is only a single group) and In this example, `HAVING` matches: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitMatch] ---- //However `HAVING` can also not match, in which case an empty result is returned: // -//["source","sql",subs="attributes,callouts,macros"] +//[source, sql] //---- //include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitNoMatch] //---- [[sql-syntax-order-by]] -[float] ==== ORDER BY The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions: @@ -305,15 +296,46 @@ Represents an input column, an output column or an ordinal number of the positio The direction, if not specified, is by default `ASC` (ascending). Regardless of the ordering specified, null values are ordered last (at the end). -IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the columns used for grouping. +IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the columns used for grouping or aggregate functions. For example, the following query sorts by an arbitrary input field (`page_count`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[orderByBasic] ---- +[[sql-syntax-order-by-grouping]] +==== Order By and Grouping + +For queries that perform grouping, ordering can be applied either on the grouping columns (by default ascending) or on aggregate functions. + +NOTE: With `GROUP BY`, make sure the ordering targets the resulting group - applying it to individual elements inside the group will have no impact on the results since regardless of the order, values inside the group are aggregated. + +For example, to order groups simply indicate the grouping key: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByGroup] +---- + +Multiple keys can be specified of course: +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByMulti] +---- + +Further more, it is possible to order groups based on aggregations of their values: + +[source, sql] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByAgg] +---- + +IMPORTANT: Ordering by aggregation is possible for up to 512 entries for memory consumption reasons. +In cases where the results pass this threshold, use <> to reduce the number +of results. + [[sql-syntax-order-by-score]] ==== Order By Score @@ -326,7 +348,7 @@ combined using the same rules as {es}'s To sort based on the `score`, use the special function `SCORE()`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[orderByScore] ---- @@ -334,7 +356,7 @@ include-tagged::{sql-specs}/docs.csv-spec[orderByScore] Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. This is possible even if `SCORE()` is not used for sorting: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[orderByScoreWithMatch] ---- @@ -344,7 +366,6 @@ Trying to return `score` from a non full-text query will return the same value f all are equally relevant. [[sql-syntax-limit]] -[float] ==== LIMIT The `LIMIT` clause restricts (limits) the number of rows returns using the format: @@ -362,7 +383,7 @@ ALL:: indicates there is no limit and thus all results are being returned. To return -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[limitBasic] ---- diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc similarity index 76% rename from docs/reference/sql/language/syntax/show-columns.asciidoc rename to docs/reference/sql/language/syntax/commands/show-columns.asciidoc index 754ce7d6f433e..ddd2d0d08a9c2 100644 --- a/docs/reference/sql/language/syntax/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc @@ -3,12 +3,12 @@ [[sql-syntax-show-columns]] === SHOW COLUMNS -beta[] - .Synopsis [source, sql] ---- -SHOW COLUMNS [ FROM | IN ]? [ table identifier<1> | [ LIKE pattern<2> ] ] +SHOW COLUMNS [ FROM | IN ]? + [table identifier | <1> + [LIKE pattern] ] <2> ---- <1> single table identifier or double quoted es multi index @@ -21,7 +21,7 @@ patterns. List the columns in table and their data type (and other attributes). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showColumns] ---- diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc similarity index 74% rename from docs/reference/sql/language/syntax/show-functions.asciidoc rename to docs/reference/sql/language/syntax/commands/show-functions.asciidoc index 4f9207a045efc..f11a4b69c3e06 100644 --- a/docs/reference/sql/language/syntax/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc @@ -3,12 +3,10 @@ [[sql-syntax-show-functions]] === SHOW FUNCTIONS -beta[] - .Synopsis [source, sql] ---- -SHOW FUNCTIONS [ LIKE pattern<1>? ]? +SHOW FUNCTIONS [LIKE pattern?]? <1> ---- <1> SQL match pattern @@ -17,7 +15,7 @@ SHOW FUNCTIONS [ LIKE pattern<1>? ]? List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showFunctions] ---- @@ -25,25 +23,25 @@ include-tagged::{sql-specs}/docs.csv-spec[showFunctions] The list of functions returned can be customized based on the pattern. It can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeExact] ---- A wildcard for exactly one character: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeChar] ---- A wildcard matching zero or more characters: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeWildcard] ---- Or of course, a variation of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showFunctionsWithPattern] ---- diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc similarity index 74% rename from docs/reference/sql/language/syntax/show-tables.asciidoc rename to docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 10934e253737e..e12086d49bad6 100644 --- a/docs/reference/sql/language/syntax/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -3,12 +3,12 @@ [[sql-syntax-show-tables]] === SHOW TABLES -beta[] - .Synopsis [source, sql] ---- -SHOW TABLES [ table identifier<1> | [ LIKE pattern<2> ] ]? +SHOW TABLES + [table identifier | <1> + [LIKE pattern ]]? <2> ---- <1> single table identifier or double quoted es multi index @@ -22,7 +22,7 @@ patterns. List the tables available to the current user and their type. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTables] ---- @@ -30,7 +30,7 @@ include-tagged::{sql-specs}/docs.csv-spec[showTables] Match multiple indices by using {es} <> notation: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -38,26 +38,26 @@ include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] One can also use the `LIKE` clause to restrict the list of names to the given pattern. The pattern can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeExact] ---- Multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] ---- A single char: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeOneChar] ---- Or a mixture of single and multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeMixed] ---- diff --git a/docs/reference/sql/language/syntax/lexic/index.asciidoc b/docs/reference/sql/language/syntax/lexic/index.asciidoc new file mode 100644 index 0000000000000..4d4f5ee3b70d3 --- /dev/null +++ b/docs/reference/sql/language/syntax/lexic/index.asciidoc @@ -0,0 +1,216 @@ +[role="xpack"] +[testenv="basic"] +[[sql-lexical-structure]] +=== Lexical Structure + +This section covers the major lexical structure of SQL, which for the most part, is going to resemble that of ANSI SQL itself hence why low-levels details are not discussed in depth. + +{es-sql} currently accepts only one _command_ at a time. A command is a sequence of _tokens_ terminated by the end of input stream. + +A token can be a __key word__, an _identifier_ (_quoted_ or _unquoted_), a _literal_ (or constant) or a special character symbol (typically a delimiter). Tokens are typically separated by whitespace (be it space, tab) though in some cases, where there is no ambiguity (typically due to a character symbol) this is not needed - however for readability purposes this should be avoided. + +[[sql-syntax-keywords]] +==== Key Words + +Take the following example: + +[source, sql] +---- +SELECT * FROM table +---- + +This query has four tokens: `SELECT`, `*`, `FROM` and `table`. The first three, namely `SELECT`, `*` and `FROM` are __key words__ meaning words that have a fixed meaning in SQL. The token `table` is an _identifier_ meaning it identifies (by name) an entity inside SQL such as a table (in this case), a column, etc... + +As one can see, both key words and identifiers have the _same_ lexical structure and thus one cannot know whether a token is one or the other without knowing the SQL language; the complete list of key words is available in the <>. +Do note that key words are case-insensitive meaning the previous example can be written as: + +[source, sql] +---- +select * fRoM table; +---- + +Identifiers however are not - as {es} is case sensitive, {es-sql} uses the received value verbatim. + +To help differentiate between the two, through-out the documentation the SQL key words are upper-cased a convention we find increases readability and thus recommend to others. + +[[sql-syntax-identifiers]] +==== Identifiers + +Identifiers can be of two types: __quoted__ and __unquoted__: + +[source, sql] +---- +SELECT ip_address FROM "hosts-*" +---- + +This query has two identifiers, `ip_address` and `hosts-*` (an <>). As `ip_address` does not clash with any key words it can be used verbatim, `hosts-*` on the other hand cannot as it clashes with `-` (minus operation) and `*` hence the double quotes. + +Another example: + +[source, sql] +---- +SELECT "from" FROM "" +---- + +The first identifier from needs to quoted as otherwise it clashes with the `FROM` key word (which is case insensitive as thus can be written as `from`) while the second identifier using {es} <> would have otherwise confuse the parser. + +Hence why in general, *especially* when dealing with user input it is *highly* recommended to use quotes for identifiers. It adds minimal increase to your queries and in return offers clarity and disambiguation. + +[[sql-syntax-literals]] +==== Literals (Constants) + +{es-sql} supports two kind of __implicitly-typed__ literals: strings and numbers. + +[[sql-syntax-string-literals]] +[float] +===== String Literals + +A string literal is an arbitrary number of characters bounded by single quotes `'`: `'Giant Robot'`. +To include a single quote in the string, escape it using another single quote: `'Captain EO''s Voyage'`. + +NOTE: An escaped single quote is *not* a double quote (`"`), but a single quote `'` _repeated_ (`''`). + +[sql-syntax-numeric-literals] +[float] +===== Numeric Literals + +Numeric literals are accepted both in decimal and scientific notation with exponent marker (`e` or `E`), starting either with a digit or decimal point `.`: + +[source, sql] +---- +1969 -- integer notation +3.14 -- decimal notation +.1234 -- decimal notation starting with decimal point +4E5 -- scientific notation (with exponent marker) +1.2e-3 -- scientific notation with decimal point +---- + +Numeric literals that contain a decimal point are always interpreted as being of type `double`. Those without are considered `integer` if they fit otherwise their type is `long` (or `BIGINT` in ANSI SQL types). + +[[sql-syntax-generic-literals]] +[float] +===== Generic Literals + +When dealing with arbitrary type literal, one creates the object by casting, typically, the string representation to the desired type. This can be achieved through the dedicated <>: + +[source, sql] +---- +CAST('1969-05-13T12:34:56' AS TIMESTAMP) -- cast the given string to datetime +CONVERT('10.0.0.1', IP) -- cast '10.0.0.1' to an IP +---- + +Do note that {es-sql} provides functions that out of the box return popular literals (like `E()`) or provide dedicated parsing for certain strings. + +[[sql-syntax-single-vs-double-quotes]] +==== Single vs Double Quotes + +It is worth pointing out that in SQL, single quotes `'` and double quotes `"` have different meaning and *cannot* be used interchangeably. +Single quotes are used to declare a <> while double quotes for <>. + +To wit: + +[source, sql] +---- +SELECT "first_name" <1> + FROM "musicians" <1> + WHERE "last_name" <1> + = 'Carroll' <2> +---- + +<1> Double quotes `"` used for column and table identifiers +<2> Single quotes `'` used for a string literal + +[[sql-syntax-special-chars]] +==== Special characters + +A few characters that are not alphanumeric have a dedicated meaning different from that of an operator. For completeness these are specified below: + + +[cols="^m,^15"] + +|=== + +s|Char +s|Description + +|* | The asterisk (or wildcard) is used in some contexts to denote all fields for a table. Can be also used as an argument to some aggregate functions. +|, | Commas are used to enumerate the elements of a list. +|. | Used in numeric constants or to separate identifiers qualifiers (catalog, table, column names, etc...). +|()| Parentheses are used for specific SQL commands, function declarations or to enforce precedence. +|=== + +[[sql-syntax-operators]] +==== Operators + +Most operators in {es-sql} have the same precedence and are left-associative. As this is done at parsing time, parenthesis need to be used to enforce a different precedence. + +The following table indicates the supported operators and their precendence (highest to lowest); + +[cols="^2m,^,^3"] + +|=== + +s|Operator/Element +s|Associativity +s|Description + +|. +|left +|qualifier separator + +|+ - +|right +|unary plus and minus (numeric literal sign) + +|* / % +|left +|multiplication, division, modulo + +|+ - +|left +|addition, substraction + +|BETWEEN IN LIKE +| +|range containment, string matching + +|< > <= >= = <=> <> != +| +|comparison + +|NOT +|right +|logical negation + +|AND +|left +|logical conjunction + +|OR +|left +|logical disjunction + +|=== + + +[[sql-syntax-comments]] +==== Comments + +{es-sql} allows comments which are sequence of characters ignored by the parsers. + +Two styles are supported: + +Single Line:: Comments start with a double dash `--` and continue until the end of the line. +Multi line:: Comments that start with `/*` and end with `*/` (also known as C-style). + + +[source, sql] +---- +-- single line comment +/* multi + line + comment + that supports /* nested comments */ + */ +---- + diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 132f892dacf2c..7db5f79a8ea44 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -3,14 +3,21 @@ [[sql-limitations]] == SQL Limitations -beta[] +[float] +[[large-parsing-trees]] +=== Large queries may throw `ParsingExpection` + +Extremely large queries can consume too much memory during the parsing phase, in which case the {es-sql} engine will +abort parsing and throw an error. In such cases, consider reducing the query to a smaller size by potentially +simplifying it or splitting it into smaller queries. [float] +[[sys-columns-describe-table-nested-fields]] === Nested fields in `SYS COLUMNS` and `DESCRIBE TABLE` {es} has a special type of relationship fields called `nested` fields. In {es-sql} they can be used by referencing their inner -sub-fields. Even though `SYS COLUMNS` and `DESCRIBE TABLE` will still display them as having the type `NESTED`, they cannot -be used in a query. One can only reference its sub-fields in the form: +sub-fields. Even though `SYS COLUMNS` in non-driver mode (in the CLI and in REST calls) and `DESCRIBE TABLE` will still display +them as having the type `NESTED`, they cannot be used in a query. One can only reference its sub-fields in the form: [source, sql] -------------------------------------------------- @@ -53,6 +60,7 @@ This is because of the way nested queries work in {es}: the root nested field wi pagination taking place on the **root nested document and not on its inner hits**. [float] +[[normalized-keyword-fields]] === Normalized `keyword` fields `keyword` fields in {es} can be normalized by defining a `normalizer`. Such fields are not supported in {es-sql}. @@ -62,6 +70,8 @@ pagination taking place on the **root nested document and not on its inner hits* Array fields are not supported due to the "invisible" way in which {es} handles an array of values: the mapping doesn't indicate whether a field is an array (has multiple values) or not, so without reading all the data, {es-sql} cannot know whether a field is a single or multi value. +When multiple values are returned for a field, by default, {es-sql} will throw an exception. However, it is possible to change this behavior through `field_multi_value_leniency` parameter in REST (disabled by default) or +`field.multi.value.leniency` in drivers (enabled by default). [float] === Sorting by aggregation @@ -108,6 +118,7 @@ But, if the sub-select would include a `GROUP BY` or `HAVING` or the enclosing ` FROM (SELECT ...) WHERE [simple_condition]`, this is currently **un-supported**. [float] +[[first-last-agg-functions-having-clause]] === Using <>/<> aggregation functions in `HAVING` clause Using `FIRST` and `LAST` in the `HAVING` clause is not supported. The same applies to diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc index c5b3f0f5399e8..db71b85bec337 100644 --- a/docs/reference/sql/overview.asciidoc +++ b/docs/reference/sql/overview.asciidoc @@ -3,8 +3,6 @@ [[sql-overview]] == Overview -beta[] - {es-sql} aims to provide a powerful yet lightweight SQL interface to {es}. [[sql-introduction]] diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc index fa370b00807ce..cbf41b46997a4 100644 --- a/docs/reference/sql/security.asciidoc +++ b/docs/reference/sql/security.asciidoc @@ -3,12 +3,11 @@ [[sql-security]] == Security -beta[] - {es-sql} integrates with security, if this is enabled on your cluster. In such a scenario, {es-sql} supports both security at the transport layer (by encrypting the communication between the consumer and the server) and authentication (for the access layer). [float] +[[ssl-tls-config]] ==== SSL/TLS configuration In case of an encrypted transport, the SSL/TLS support needs to be enabled in {es-sql} to properly establish communication with {es}. This is done by setting the `ssl` property to `true` or by using the `https` prefix in the URL. + @@ -34,7 +33,7 @@ the API require `cluster:monitor/main`. The following example configures a role that can run SQL in JDBC querying the `test` and `bort` indices: -["source","yaml",subs="attributes,callouts,macros"] +[source, yaml] -------------------------------------------------- include-tagged::{sql-tests}/security/roles.yml[cli_drivers] -------------------------------------------------- diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index 2ca2a01249629..d928477f38d74 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -1,71 +1,73 @@ [[setup-upgrade]] -= Upgrade Elasticsearch += Upgrade {es} [partintro] -- -Elasticsearch can usually be upgraded using a <> -process so upgrading does not interrupt service. However, you might -need to <> indices created in older versions. -Upgrades across major versions prior to 6.0 require a <>. - -When upgrading to a new version of Elasticsearch, you need to upgrade each -of the products in your Elastic Stack. The steps you need to take to upgrade -differ depending on which products you are using. Want a list that's tailored -to your stack? Try out our {upgrade_guide}[Interactive Upgrade Guide]. For -more information about upgrading your stack, see {stack-ref}[Upgrading the -Elastic Stack]. - -[IMPORTANT] -=========================================== -Before upgrading Elasticsearch: - -* Review the <> for changes that -affect your application. -* Check the <> to see if you are using -any deprecated features. -* If you use custom plugins, make sure compatible versions are available. -* Test upgrades in a dev environment before upgrading your production cluster. -* <> before upgrading. -You **cannot roll back** to an earlier version unless you have a backup of -your data. - -=========================================== - - -The following table shows when you can perform a rolling upgrade, when you -need to reindex or delete old indices, and when a full cluster restart is -required. - -[[upgrade-paths]] -[cols="1> (where `y > x`) -|5.6 |6.x |<> footnoteref:[reindexfn, You must delete or reindex any indices created in 2.x before upgrading.] -|5.0-5.5 |6.x |<> footnoteref:[reindexfn] -|<5.x |6.x |<> -|6.x |6.y |<> (where `y > x`) -|======================================================================= - -[IMPORTANT] -=============================================== - -Elasticsearch can read indices created in the *previous major version*. -Older indices must be reindexed or deleted. Elasticsearch 6.x -can use indices created in Elasticsearch 5.x, but not those created in -Elasticsearch 2.x or before. Elasticsearch 5.x can use indices created in -Elasticsearch 2.x, but not those created in 1.x or before. - -This also applies to indices backed up with <>. If an index was originally created in 2.x, it cannot be -restored to a 6.x cluster even if the snapshot was created by a 5.x cluster. - -Elasticsearch nodes will fail to start if incompatible indices are present. - -For information about how to upgrade old indices, see <>. - -=============================================== +{es} can usually be upgraded using a <> +process so upgrading does not interrupt service. Rolling upgrades are supported: + +* Between minor versions +* From 5.6 to 6.8 +* From 6.8 to {version} + + +The following table shows the recommended upgrade paths to {version}. + +[cols="<1m,3",options="header",] +|==== +|Upgrade from +|Recommended upgrade path to {version} + +|7.0 +|<> to {version} + +|6.8 +|<> to {version} + +|6.0–6.7 +a| + +. https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html[Rolling upgrade] to 6.8 +. <> to {version} + +|5.6 +a| + +. https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html[Rolling upgrade] to 6.8 +. <> to {version} + +|5.0–5.5 +a| + +. https://www.elastic.co/guide/en/elasticsearch/reference/5.6/rolling-upgrades.html[Rolling upgrade] to 5.6 +. https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html[Rolling upgrade] to 6.8 +. <> to {version} +|==== + + +[WARNING] +==== +The following upgrade paths are *not* supported: + +* 6.8 to 7.0. +* 6.7 to {version}. +==== + +{es} can read indices created in the previous major version. If you +have indices created in 5.x or before, you must reindex or delete them +before upgrading to {version}. {es} nodes will fail to start if +incompatible indices are present. Snapshots of 5.x or earlier indices cannot be +restored to a 7.x cluster even if they were created by a 6.x cluster. For +information about upgrading old indices, see <>. + +When upgrading to a new version of {es}, you need to upgrade each +of the products in your Elastic Stack. For more information, see the +{stack-ref}/upgrading-elastic-stack.html[Elastic Stack Installation and Upgrade Guide]. + +To upgrade directly to {version} from 6.7 or earlier, you must shut down the +cluster, install {version}, and restart. For more information, see +<>. + -- include::upgrade/rolling_upgrade.asciidoc[] diff --git a/docs/reference/upgrade/close-ml.asciidoc b/docs/reference/upgrade/close-ml.asciidoc new file mode 100644 index 0000000000000..affcd27926677 --- /dev/null +++ b/docs/reference/upgrade/close-ml.asciidoc @@ -0,0 +1,43 @@ +[testenv="platinum"] + +//////////// +Take us out of upgrade mode after running any snippets on this page. + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=false +-------------------------------------------------- +// CONSOLE +// TEARDOWN +//////////// + +If your {ml} indices were created before {prev-major-version}, you must +<>. + +If your {ml} indices were created in {prev-major-version}, you can: + +* Leave your {ml} jobs running during the upgrade. When you shut down a +{ml} node, its jobs automatically move to another node and restore the model +states. This option enables your jobs to continue running during the upgrade but +it puts increased load on the cluster. + +* Temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and +prevent new jobs from opening by using the +<>: ++ +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=true +-------------------------------------------------- +// CONSOLE ++ +When you disable upgrade mode, the jobs resume using the last model +state that was automatically saved. This option avoids the overhead of managing +active jobs during the upgrade and is faster than explicitly stopping {dfeeds} +and closing jobs. + +* {stack-ov}/stopping-ml.html[Stop all {dfeeds} and close all jobs]. This option +saves the model state at the time of closure. When you reopen the jobs after the +upgrade, they use the exact same model. However, saving the latest model state +takes longer than using upgrade mode, especially if you have a lot of jobs or +jobs with large model states. diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 4c229e373f505..6d06da0a33b56 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -1,15 +1,20 @@ [[restart-upgrade]] == Full cluster restart upgrade -A full cluster restart upgrade requires that you shut all nodes in the cluster -down, upgrade them, and restart the cluster. A full cluster restart was required -when upgrading to major versions prior to 6.x. Elasticsearch 6.x supports -<> from *Elasticsearch 5.6*. Upgrading to -6.x from earlier versions requires a full cluster restart. See the -<> to verify the type of upgrade you need -to perform. +To upgrade directly to {es} {version} from versions 6.0-6.7, you must shut down +all nodes in the cluster, upgrade each node to {version}, and restart the cluster. -To perform a full cluster restart upgrade: +NOTE: If you are running a version prior to 6.0, +{stack-ref-68}/upgrading-elastic-stack.html[upgrade to 6.8] +and reindex your old indices or bring up a new {version} cluster and +<>. + +include::preparing_to_upgrade.asciidoc[] + +[float] +=== Upgrading your cluster + +To perform a full cluster restart upgrade to {version}: . *Disable shard allocation.* + @@ -26,8 +31,11 @@ recovery. include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* See -{xpack-ref}/stopping-ml.html[Stopping Machine Learning]. +. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) ++ +-- +include::close-ml.asciidoc[] +-- . *Shutdown all nodes.* + @@ -43,15 +51,28 @@ include::remove-xpack.asciidoc[] + -- include::upgrade-node.asciidoc[] +-- ++ +-- include::set-paths-tip.asciidoc[] -- +If upgrading from a 6.x cluster, you must also +<> by +setting the <> on +the master-eligible nodes. + . *Upgrade any plugins.* + Use the `elasticsearch-plugin` script to install the upgraded version of each -installed Elasticsearch plugin. All plugins must be upgraded when you upgrade +installed {es} plugin. All plugins must be upgraded when you upgrade a node. +. If you use {es} {security-features} to define realms, verify that your realm +settings are up-to-date. The format of realm settings changed in version 7.0, in +particular, the placement of the realm type changed. See +<>. + . *Start each upgraded node.* + -- @@ -59,10 +80,6 @@ If you have dedicated master nodes, start them first and wait for them to form a cluster and elect a master before proceeding with your data nodes. You can check progress by looking at the logs. -If upgrading from a 6.x cluster, you must -<> by -setting the `cluster.initial_master_nodes` setting. - As soon as enough master-eligible nodes have discovered each other, they form a cluster and elect a master. At that point, you can use <> and <> to monitor nodes @@ -132,3 +149,7 @@ GET _cat/recovery -- . *Restart machine learning jobs.* ++ +-- +include::open-ml.asciidoc[] +-- diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index abd40336e9b08..839488f541f51 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -3,17 +3,18 @@ When you shut down a node, the allocation process waits for `index.unassigned.node_left.delayed_timeout` (by default, one minute) before starting to replicate the shards on that node to other nodes in the cluster, which can involve a lot of I/O. Since the node is shortly going to be -restarted, this I/O is unnecessary. You can avoid racing the clock by disabling -allocation before shutting down the node: +restarted, this I/O is unnecessary. You can avoid racing the clock by +<> of replicas before shutting down +the node: [source,js] -------------------------------------------------- PUT _cluster/settings { "persistent": { - "cluster.routing.allocation.enable": "none" + "cluster.routing.allocation.enable": "primaries" } } -------------------------------------------------- // CONSOLE -// TEST[skip:indexes don't assign] \ No newline at end of file +// TEST[skip:indexes don't assign] diff --git a/docs/reference/upgrade/open-ml.asciidoc b/docs/reference/upgrade/open-ml.asciidoc new file mode 100644 index 0000000000000..a8882e7f13450 --- /dev/null +++ b/docs/reference/upgrade/open-ml.asciidoc @@ -0,0 +1,14 @@ +[testenv="platinum"] +If you temporarily halted the tasks associated with your {ml} jobs, +use the <> to return them to active +states: + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=false +-------------------------------------------------- +// CONSOLE + +If you closed all {ml} jobs before the upgrade, open the jobs and start the +datafeeds from {kib} or with the <> and +<> APIs. diff --git a/docs/reference/upgrade/preparing_to_upgrade.asciidoc b/docs/reference/upgrade/preparing_to_upgrade.asciidoc new file mode 100644 index 0000000000000..efacb6ab75935 --- /dev/null +++ b/docs/reference/upgrade/preparing_to_upgrade.asciidoc @@ -0,0 +1,28 @@ +[float] +=== Preparing to upgrade + +It is important to prepare carefully before starting an upgrade. Once you have +started to upgrade your cluster to version {version} you must complete the +upgrade. As soon as the cluster contains nodes of version {version} it may make +changes to its internal state that cannot be reverted. If you cannot complete +the upgrade then you should discard the partially-upgraded cluster, deploy an +empty cluster of the version before the upgrade, and restore its contents from +a snapshot. + +Before you start to upgrade your cluster to version {version} you should do the +following. + +. Check the <> to see if you are using any +deprecated features and update your code accordingly. + +. Review the <> and make any necessary +changes to your code and configuration for version {version}. + +. If you use any plugins, make sure there is a version of each plugin that is +compatible with {es} version {version}. + +. Test the upgrade in an isolated environment before upgrading your production +cluster. + +. <> + diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index 0f1eb155e6433..faa8fbc1639b4 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -1,55 +1,33 @@ [[reindex-upgrade]] == Reindex before upgrading -Elasticsearch can read indices created in the *previous major version*. -Older indices must be reindexed or deleted. Elasticsearch 6.x -can use indices created in Elasticsearch 5.x, but not those created in -Elasticsearch 2.x or before. Elasticsearch 5.x can use indices created in -Elasticsearch 2.x, but not those created in 1.x or before. +{es} can read indices created in the previous major version. If you +have indices created in 5.x or before, you must reindex or delete them +before upgrading to {version}. {es} nodes will fail to start if +incompatible indices are present. Snapshots of 5.x or earlier indices cannot be +restored to a 7.x cluster even if they were created by a 6.x cluster. -Elasticsearch nodes will fail to start if incompatible indices are present. +This restriction also applies to the internal indices that are used by +{kib} and the {xpack} features. Therefore, before you can use {kib} and +{xpack} features in {version}, you must ensure the internal indices have a +compatible index structure. -To upgrade an Elasticsearch 5.x cluster that contains indices created in 2.x, -you must reindex or delete them before upgrading to 6.x. -For more information, see <>. +You have two options for reindexing old indices: -To upgrade an Elasticsearch cluster running 2.x, you have two options: - -* Perform a <> to 5.6, - <> the 2.x indices, then perform a - <> to 6.x. If your Elasticsearch 2.x - cluster contains indices that were created before 2.x, you must either - delete or reindex them before upgrading to 5.6. For more information about - upgrading from 2.x to 5.6, see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setup-upgrade.html[ - Upgrading Elasticsearch] in the Elasticsearch 5.6 Reference. - -* Create a new 6.x cluster and <> to import indices directly from the 2.x cluster. - -To upgrade an Elasticsearch 1.x cluster, you have two options: - -* Perform a <> to Elasticsearch - 2.4.x and <> or delete the 1.x indices. - Then, perform a full cluster restart upgrade to 5.6 and reindex or delete - the 2.x indices. Finally, perform a <> - to 6.x. For more information about upgrading from 1.x to 2.4, see https://www.elastic.co/guide/en/elasticsearch/reference/2.4/setup-upgrade.html[ - Upgrading Elasticsearch] in the Elasticsearch 2.4 Reference. - For more information about upgrading from 2.4 to 5.6, see https://www.elastic.co/guide/en/elasticsearch/reference/5.6/setup-upgrade.html[ - Upgrading Elasticsearch] in the Elasticsearch 5.6 Reference. - -* Create a new 6.x cluster and <> to import indices directly from the 1.x cluster. +* <> on your 6.x cluster before upgrading. +* Create a new {version} cluster and <>. +This enables you to reindex indices that reside on clusters running any version of {es}. .Upgrading time-based indices ******************************************* If you use time-based indices, you likely won't need to carry -pre-5.x indices forward to 6.x. Data in time-based indices +pre-6.x indices forward to {version}. Data in time-based indices generally becomes less useful as time passes and are deleted as they age past your retention period. Unless you have an unusually long retention period, you can just -wait to upgrade to 6.x until all of your pre-5.x indices have +wait to upgrade to 6.x until all of your pre-6.x indices have been deleted. ******************************************* @@ -58,55 +36,100 @@ been deleted. [[reindex-upgrade-inplace]] === Reindex in place -To manually reindex your old indices with the <>: +You can use the Upgrade Assistant in {kib} 6.8 to automatically reindex 5.x +indices you need to carry forward to {version}. -. Create a new index and copy the mappings and settings from the old index. +To manually reindex your old indices in place: + +. Create an index with 7.x compatible mappings. . Set the `refresh_interval` to `-1` and the `number_of_replicas` to `0` for efficient reindexing. -. Reindex all documents from the old index into the new index using the - <>. +. Use the <> to copy documents from the +5.x index into the new index. You can use a script to perform any necessary +modifications to the document data and metadata during reindexing. . Reset the `refresh_interval` and `number_of_replicas` to the values used in the old index. . Wait for the index status to change to `green`. . In a single <> request: - .. Delete the old index. .. Add an alias with the old index name to the new index. .. Add any aliases that existed on the old index to the new index. +ifdef::include-xpack[] +[TIP] +==== +If you use {ml-features} and your {ml} indices were created before +{prev-major-version}, you must temporarily halt the tasks associated with your +{ml} jobs and {dfeeds} and prevent new jobs from opening during the reindex. Use +the <> or +{stack-ov}/stopping-ml.html[stop all {dfeeds} and close all {ml} jobs]. + +If you use {es} {security-features}, before you reindex `.security*` internal +indices it is a good idea to create a temporary superuser account in the `file` +realm. + +. On a single node, add a temporary superuser account to the `file` realm. For +example, run the <> command: ++ +-- +[source,sh] +---------------------------------------------------------- +bin/elasticsearch-users useradd \ +-p -r superuser +---------------------------------------------------------- +-- + +. Use these credentials when you reindex the `.security*` index. That is to say, +use them to log into {kib} and run the Upgrade Assistant or to call the +reindex API. You can use your regular administration credentials to +reindex the other internal indices. -// Need to update the CSS to override sidebar titles. -[role="xpack"] -.Migration assistance and upgrade tools -******************************************* -{xpack} 5.6 provides migration assistance and upgrade tools that simplify -reindexing and upgrading to 6.x. These tools are free with the X-Pack trial -and Basic licenses and you can use them to upgrade whether or not X-Pack is a -regular part of your Elastic Stack. For more information, see -{stack-ref}/upgrading-elastic-stack.html. -******************************************* +. Delete the temporary superuser account from the file realm. For +example, run the {ref}/users-command.html[elasticsearch-users userdel] command: ++ +-- +[source,sh] +---------------------------------------------------------- +bin/elasticsearch-users userdel +---------------------------------------------------------- +-- + +For more information, see <>. +==== +endif::include-xpack[] [[reindex-upgrade-remote]] === Reindex from a remote cluster You can use <> to migrate indices from -your old cluster to a new 6.x cluster. This enables you move to 6.x from a -pre-5.6 cluster without interrupting service. +your old cluster to a new {version} cluster. This enables you move to {version} +from a pre-6.8 cluster without interrupting service. [WARNING] ============================================= -Elasticsearch provides backwards compatibility support that enables +{es} provides backwards compatibility support that enables indices from the previous major version to be upgraded to the current major version. Skipping a major version means that you must resolve any backward compatibility issues yourself. +ifdef::include-xpack[] +If you use {ml-features} and you're migrating indices from a 6.5 or earlier +cluster, the job and {dfeed} configuration information are not stored in an +index. You must recreate your {ml} jobs in the new cluster. If you are migrating +from a 6.6 or later cluster, it is a good idea to temporarily halt the tasks +associated with your {ml} jobs and {dfeeds} to prevent inconsistencies between +different {ml} indices that are reindexed at slightly different times. Use the +<> or +{stack-ov}/stopping-ml.html[stop all {dfeeds} and close all {ml} jobs]. +endif::include-xpack[] + ============================================= To migrate your indices: -. Set up a new 6.x cluster alongside your old cluster. Enable it to access -your old cluster by adding your old cluster to the `reindex.remote.whitelist` in `elasticsearch.yml`: +. Set up a new {version} cluster and add the existing cluster to the +`reindex.remote.whitelist` in `elasticsearch.yml`. + -- [source,yaml] @@ -123,14 +146,14 @@ cluster and remove nodes from the old one. ============================================= -- -. For each index that you need to migrate to the 6.x cluster: +. For each index that you need to migrate to the new cluster: -.. Create a new index in 6.x with the appropriate mappings and settings. Set the +.. Create an index the appropriate mappings and settings. Set the `refresh_interval` to `-1` and set `number_of_replicas` to `0` for faster reindexing. -.. <> to pull documents from the - old index into the new 6.x index: +.. Use the <> to pull documents from the + remote index into the new {version} index: + -- [source,js] @@ -172,5 +195,5 @@ monitor progress of the reindex job with the <>: `number_of_replicas` to the desired values (the default settings are `30s` and `1`). -.. Once replication is complete and the status of the new index is `green`, +.. Once reindexing is complete and the status of the new index is `green`, you can delete the old index. diff --git a/docs/reference/upgrade/remove-xpack.asciidoc b/docs/reference/upgrade/remove-xpack.asciidoc index eb13cec074b8e..eac8847513fa8 100644 --- a/docs/reference/upgrade/remove-xpack.asciidoc +++ b/docs/reference/upgrade/remove-xpack.asciidoc @@ -1,8 +1,6 @@ -IMPORTANT: If you are upgrading from a version prior to 6.3 and use {xpack} -then you must remove the {xpack} plugin before upgrading with -`bin/elasticsearch-plugin remove x-pack`. As of 6.3, {xpack} is included in -the default distribution so make sure to upgrade to that one. If you upgrade -without removing the {xpack} plugin first the node will fail to start. If you -did not remove the {xpack} plugin and the node fails to start then you must -downgrade to your previous version, remove {xpack}, and then upgrade again. -In general downgrading is not supported but in this particular case it is. +IMPORTANT: If you are upgrading from 6.2 or earlier and use {xpack}, +run `bin/elasticsearch-plugin remove x-pack` to remove the {xpack} plugin before +you upgrade. The {xpack} functionality is now included in the default distribution +and is no longer installed separately. The node won't start after upgrade if +the {xpack} plugin is present. You will need to downgrade, remove the plugin, +and reapply the upgrade. diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index dff3895ac4c1d..8ae182caf7561 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -1,30 +1,33 @@ [[rolling-upgrades]] == Rolling upgrades -A rolling upgrade allows an Elasticsearch cluster to be upgraded one node at +A rolling upgrade allows an {es} cluster to be upgraded one node at a time so upgrading does not interrupt service. Running multiple versions of -Elasticsearch in the same cluster beyond the duration of an upgrade is +{es} in the same cluster beyond the duration of an upgrade is not supported, as shards cannot be replicated from upgraded nodes to nodes running the older version. -Rolling upgrades can be performed between minor versions. Elasticsearch -6.x supports rolling upgrades from *Elasticsearch 5.6*. -Upgrading from earlier 5.x versions requires a <>. You must <> from -versions prior to 5.x. +It is best to upgrade the master-eligible nodes in your cluster after all of +the other nodes. Once you have started to upgrade the master-eligible nodes +they may form a cluster that nodes of older versions cannot join. If you +upgrade the master-eligible nodes last then all the other nodes will not be +running an older version and so they will be able to join the cluster. -WARNING: If the {es} {security-features} are enabled on your 5.x cluster, before -you can do a rolling upgrade you must encrypt the internode-communication with -SSL/TLS, which requires a full cluster restart. For more information about this -requirement and the associated bootstrap check, see <>. +Rolling upgrades are supported: -WARNING: The format used for the internal indices used by Kibana and {xpack} -has changed in 6.x. When upgrading from 5.6 to 6.x, these internal indices have -to be {stack-ref}/upgrading-elastic-stack.html#upgrade-internal-indices[upgraded] -before the rolling upgrade procedure can start. Otherwise the upgraded node will -refuse to join the cluster. +* Between minor versions +* {stack-ref-68}/upgrading-elastic-stack.html[From 5.6 to 6.8] +* From 6.8 to {version} -To perform a rolling upgrade: +Upgrading directly to {version} from 6.7 or earlier requires a +<>. + +include::preparing_to_upgrade.asciidoc[] + +[float] +=== Upgrading your cluster + +To perform a rolling upgrade to {version}: . *Disable shard allocation*. + @@ -43,8 +46,11 @@ include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* See -{xpack-ref}/stopping-ml.html[Stopping Machine Learning]. +. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) ++ +-- +include::close-ml.asciidoc[] +-- . [[upgrade-node]] *Shut down a single node*. + @@ -55,20 +61,26 @@ include::shut-down-node.asciidoc[] . *Upgrade the node you shut down.* + -- -include::remove-xpack.asciidoc[] --- -+ --- include::upgrade-node.asciidoc[] include::set-paths-tip.asciidoc[] + +[[rolling-upgrades-bootstrapping]] +NOTE: You should leave `cluster.initial_master_nodes` unset while performing a +rolling upgrade. Each upgraded node is joining an existing cluster so there is +no need for <>. -- . *Upgrade any plugins.* + Use the `elasticsearch-plugin` script to install the upgraded version of each -installed Elasticsearch plugin. All plugins must be upgraded when you upgrade +installed {es} plugin. All plugins must be upgraded when you upgrade a node. +. If you use {es} {security-features} to define realms, verify that your realm +settings are up-to-date. The format of realm settings changed in version 7.0, in +particular, the placement of the realm type changed. See +<>. + . *Start the upgraded node.* + -- @@ -111,7 +123,7 @@ You can check progress by submitting a <> request: [source,sh] -------------------------------------------------- -GET _cat/health +GET _cat/health?v -------------------------------------------------- // CONSOLE @@ -160,20 +172,39 @@ for each node that needs to be updated. -- . *Restart machine learning jobs.* ++ +-- +include::open-ml.asciidoc[] +-- + [IMPORTANT] ==================================================== During a rolling upgrade, the cluster continues to operate normally. However, any new functionality is disabled or operates in a backward compatible mode -until all nodes in the cluster are upgraded. New functionality -becomes operational once the upgrade is complete and all nodes are running the -new version. Once that has happened, there's no way to return to operating -in a backward compatible mode. Nodes running the previous major version will -not be allowed to join the fully-updated cluster. +until all nodes in the cluster are upgraded. New functionality becomes +operational once the upgrade is complete and all nodes are running the new +version. Once that has happened, there's no way to return to operating in a +backward compatible mode. Nodes running the previous major version will not be +allowed to join the fully-updated cluster. In the unlikely case of a network malfunction during the upgrade process that -isolates all remaining old nodes from the cluster, you must take the -old nodes offline and upgrade them to enable them to join the cluster. - +isolates all remaining old nodes from the cluster, you must take the old nodes +offline and upgrade them to enable them to join the cluster. + +If you stop half or more of the master-eligible nodes all at once during the +upgrade then the cluster will become unavailable, meaning that the upgrade is +no longer a _rolling_ upgrade. If this happens, you should upgrade and restart +all of the stopped master-eligible nodes to allow the cluster to form again, as +if performing a <>. It may also +be necessary to upgrade all of the remaining old nodes before they can join the +cluster after it re-forms. + +Similarly, if you run a testing/development environment with only one master +node, the master node should be upgraded last. Restarting a single master node +forces the cluster to be reformed. The new cluster will initially only have the +upgraded master node and will thus reject the older nodes when they re-join the +cluster. Nodes that have already been upgraded will successfully re-join the +upgraded master. ==================================================== diff --git a/docs/reference/upgrade/set-paths-tip.asciidoc b/docs/reference/upgrade/set-paths-tip.asciidoc index adfe3e29dac3a..ee1a7ba73ecd5 100644 --- a/docs/reference/upgrade/set-paths-tip.asciidoc +++ b/docs/reference/upgrade/set-paths-tip.asciidoc @@ -2,14 +2,14 @@ ================================================ When you extract the zip or tarball packages, the `elasticsearch-n.n.n` -directory contains the Elasticsearch `config`, `data`, `logs` and +directory contains the {es} `config`, `data`, `logs` and `plugins` directories. -We recommend moving these directories out of the Elasticsearch directory -so that there is no chance of deleting them when you upgrade Elasticsearch. +We recommend moving these directories out of the {es} directory +so that there is no chance of deleting them when you upgrade {es}. To specify the new locations, use the `ES_PATH_CONF` environment variable and the `path.data` and `path.logs` settings. For more information, -see <>. +see <>. The <> and <> packages place these directories in the appropriate place for each operating system. In production, we recommend diff --git a/docs/reference/upgrade/shut-down-node.asciidoc b/docs/reference/upgrade/shut-down-node.asciidoc index 258d170906a67..d076f8dbfc468 100644 --- a/docs/reference/upgrade/shut-down-node.asciidoc +++ b/docs/reference/upgrade/shut-down-node.asciidoc @@ -1,20 +1,20 @@ -* If you are running Elasticsearch with `systemd`: +* If you are running {es} with `systemd`: + [source,sh] -------------------------------------------------- sudo systemctl stop elasticsearch.service -------------------------------------------------- -* If you are running Elasticsearch with SysV `init`: +* If you are running {es} with SysV `init`: + [source,sh] -------------------------------------------------- sudo -i service elasticsearch stop -------------------------------------------------- -* If you are running Elasticsearch as a daemon: +* If you are running {es} as a daemon: + [source,sh] -------------------------------------------------- kill $(cat pid) --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/upgrade/upgrade-node.asciidoc b/docs/reference/upgrade/upgrade-node.asciidoc index c97b84ef67004..c445c03a38abb 100644 --- a/docs/reference/upgrade/upgrade-node.asciidoc +++ b/docs/reference/upgrade/upgrade-node.asciidoc @@ -2,7 +2,7 @@ To upgrade using a <> or <> package: * Use `rpm` or `dpkg` to install the new package. All files are installed in the appropriate location for the operating system - and Elasticsearch config files are not overwritten. + and {es} config files are not overwritten. To upgrade using a zip or compressed tarball: @@ -18,12 +18,10 @@ To upgrade using a zip or compressed tarball: data directory. If you are not using an external `data` directory, copy your old data directory over to the new installation. + + --- -IMPORTANT: If you use {monitoring}, re-use the data directory when you upgrade +IMPORTANT: If you use {monitor-features}, re-use the data directory when you upgrade {es}. Monitoring identifies unique {es} nodes by using the persistent UUID, which is stored in the data directory. --- .. Set `path.logs` in `config/elasticsearch.yml` to point to the location where you want to store your logs. If you do not specify this setting, diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 27aa620a34fd2..71a87ef57b424 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -279,24 +279,22 @@ shard will be allocated upon reopening the index. [float] === Use two phase commit for Cluster State publishing (STATUS: DONE, v5.0.0) -A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#fault-detection[monitors the cluster nodes] +A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-fault-detection.html[monitors the cluster nodes] and removes any node from the cluster that doesn't respond to its pings in a timely -fashion. If the master is left with fewer nodes than the `discovery.zen.minimum_master_nodes` -settings, it will step down and a new master election will start. +fashion. If the master is left with too few nodes, it will step down and a new master election will start. When a network partition causes a master node to lose many followers, there is a short window in time until the node loss is detected and the master steps down. During that window, the master may erroneously accept and acknowledge cluster state changes. To avoid this, we introduce a new phase to cluster state publishing where the proposed cluster state is sent to all nodes -but is not yet committed. Only once enough nodes (`discovery.zen.minimum_master_nodes`) actively acknowledge +but is not yet committed. Only once enough nodes actively acknowledge the change, it is committed and commit messages are sent to the nodes. See {GIT}13062[#13062]. [float] === Wait on incoming joins before electing local node as master (STATUS: DONE, v2.0.0) During master election each node pings in order to discover other nodes and validate the liveness of existing -nodes. Based on this information the node either discovers an existing master or, if enough nodes are found -(see https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#master-election[`discovery.zen.minimum_master_nodes`]) a new master will be elected. Currently, the node that is +nodes. Based on this information the node either discovers an existing master or, if enough nodes are found a new master will be elected. Currently, the node that is elected as master will update the cluster state to indicate the result of the election. Other nodes will submit a join request to the newly elected master node. Instead of immediately processing the election result, the elected master node should wait for the incoming joins from other nodes, thus validating that the result of the election is properly applied. As soon as enough diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index b4a6c49754869..e87df16264e94 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.smoketest; -import org.apache.http.HttpHost; -import org.apache.lucene.util.BytesRef; - import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.http.HttpHost; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.TimeUnits; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.ParseField; @@ -48,12 +49,13 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +//The default 20 minutes timeout isn't always enough, please do not increase further than 30 before analyzing what makes this suite so slow +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public DocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { @@ -96,7 +98,7 @@ protected ClientYamlTestClient initClientYamlTestClient( } /** - * Compares the the results of running two analyzers against many random + * Compares the results of running two analyzers against many random * strings. The goal is to figure out if two anlayzers are "the same" by * comparing their results. This is far from perfect but should be fairly * accurate, especially for gross things like missing {@code decimal_digit} diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 2970024ea74ef..4ace7103b8ce8 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=53b71812f18cdb2777e9f1b2a0f2038683907c90bdc406bc64d8b400e1fb2c3b +distributionSha256Sum=9dc729f6dbfbbc4df1692665d301e028976dacac296a126f16148941a9cf012e diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index 0cd1f256c4037..b1f3b338255c4 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -26,7 +26,7 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" } -unitTest.enabled = false +test.enabled = false // Since CLI does not depend on :server, it cannot run the jarHell task jarHell.enabled = false diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 3de0ae5117e6a..277698bd8cc7f 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -196,18 +196,23 @@ public static void checkJarHell(Set urls, Consumer output) throws U // case for tests: where we have class files in the classpath final Path root = PathUtils.get(url.toURI()); final String sep = root.getFileSystem().getSeparator(); - Files.walkFileTree(root, new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - String entry = root.relativize(file).toString(); - if (entry.endsWith(".class")) { - // normalize with the os separator, remove '.class' - entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length()); - checkClass(clazzes, entry, path); + + // don't try and walk class or resource directories that don't exist + // gradle will add these to the classpath even if they never get created + if (Files.exists(root)) { + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + String entry = root.relativize(file).toString(); + if (entry.endsWith(".class")) { + // normalize with the os separator, remove '.class' + entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length()); + checkClass(clazzes, entry, path); + } + return super.visitFile(file, attrs); } - return super.visitFile(file, attrs); - } - }); + }); + } } } } diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index 46d19d2a814fe..d3e9afd4970df 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -24,6 +24,7 @@ import java.nio.file.FileVisitResult; import java.nio.file.FileVisitor; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; @@ -249,6 +250,7 @@ public FileVisitResult visitFileFailed(final Path file, final IOException exc) t } // TODO: replace with constants class if needed (cf. org.apache.lucene.util.Constants) + private static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows"); private static final boolean LINUX = System.getProperty("os.name").startsWith("Linux"); private static final boolean MAC_OS_X = System.getProperty("os.name").startsWith("Mac OS X"); @@ -263,18 +265,28 @@ public FileVisitResult visitFileFailed(final Path file, final IOException exc) t * systems and operating systems allow to fsync on a directory) */ public static void fsync(final Path fileToSync, final boolean isDir) throws IOException { + if (isDir && WINDOWS) { + // opening a directory on Windows fails, directories can not be fsynced there + if (Files.exists(fileToSync) == false) { + // yet do not suppress trying to fsync directories that do not exist + throw new NoSuchFileException(fileToSync.toString()); + } + return; + } try (FileChannel file = FileChannel.open(fileToSync, isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)) { - file.force(true); - } catch (final IOException ioe) { - if (isDir) { - assert (LINUX || MAC_OS_X) == false : - "on Linux and MacOSX fsyncing a directory should not throw IOException, "+ - "we just don't want to rely on that in production (undocumented); got: " + ioe; - // ignore exception if it is a directory - return; + try { + file.force(true); + } catch (final IOException e) { + if (isDir) { + assert (LINUX || MAC_OS_X) == false : + "on Linux and MacOSX fsyncing a directory should not throw IOException, "+ + "we just don't want to rely on that in production (undocumented); got: " + e; + // ignore exception if it is a directory + return; + } + // throw original exception + throw e; } - // throw original exception - throw ioe; } } } diff --git a/libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java index ee5af323b5219..8af0f2a707e24 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.mockfile.FilterFileSystemProvider; import org.apache.lucene.mockfile.FilterPath; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.ESTestCase; @@ -27,14 +28,20 @@ import java.io.IOException; import java.io.OutputStream; import java.net.URI; +import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; import java.nio.file.FileSystem; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.OpenOption; import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.Set; import java.util.function.Function; import static org.hamcrest.Matchers.arrayWithSize; @@ -214,6 +221,43 @@ public void testFsyncDirectory() throws Exception { // no exception } + private static final class AccessDeniedWhileOpeningDirectoryFileSystem extends FilterFileSystemProvider { + + AccessDeniedWhileOpeningDirectoryFileSystem(final FileSystem delegate) { + super("access_denied://", Objects.requireNonNull(delegate)); + } + + @Override + public FileChannel newFileChannel( + final Path path, + final Set options, + final FileAttribute... attrs) throws IOException { + if (Files.isDirectory(path)) { + throw new AccessDeniedException(path.toString()); + } + return delegate.newFileChannel(path, options, attrs); + } + + } + + public void testFsyncAccessDeniedOpeningDirectory() throws Exception { + final Path path = createTempDir().toRealPath(); + final FileSystem fs = new AccessDeniedWhileOpeningDirectoryFileSystem(path.getFileSystem()).getFileSystem(URI.create("file:///")); + final Path wrapped = new FilterPath(path, fs); + if (Constants.WINDOWS) { + // no exception, we early return and do not even try to open the directory + IOUtils.fsync(wrapped, true); + } else { + expectThrows(AccessDeniedException.class, () -> IOUtils.fsync(wrapped, true)); + } + } + + public void testFsyncNonExistentDirectory() throws Exception { + final Path dir = FilterPath.unwrap(createTempDir()).toRealPath(); + final Path nonExistentDir = dir.resolve("non-existent"); + expectThrows(NoSuchFileException.class, () -> IOUtils.fsync(nonExistentDir, true)); + } + public void testFsyncFile() throws IOException { final Path path = createTempDir().toRealPath(); final Path subPath = path.resolve(randomAlphaOfLength(8)); diff --git a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java index 407d73134b611..66edf0646882e 100644 --- a/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java @@ -182,7 +182,7 @@ public Map parse(String inputString) { * all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for * (the delimiter) is generally small and rare the naive approach is efficient. * - * In this case the the string that is walked is the input string, and the string being searched for is the current delimiter. + * In this case the string that is walked is the input string, and the string being searched for is the current delimiter. * For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the * input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered * list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. diff --git a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java index c22cec98eb79a..c604b8c089b84 100644 --- a/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java +++ b/libs/dissect/src/test/java/org/elasticsearch/dissect/DissectParserTests.java @@ -24,10 +24,10 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; +import org.mockito.internal.util.collections.Sets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -67,7 +67,7 @@ public void testLogstashSpecs() { assertMatch("%{a} » %{b}»%{c}€%{d}", "foo » bar»baz€quux", Arrays.asList("a", "b", "c", "d"), Arrays.asList("foo", "bar", "baz", "quux")); assertMatch("%{a} %{b} %{+a}", "foo bar baz quux", Arrays.asList("a", "b"), Arrays.asList("foo baz quux", "bar"), " "); - //Logstash supports implicit ordering based anchored by the the key without the '+' + //Logstash supports implicit ordering based anchored by the key without the '+' //This implementation will only honor implicit ordering for appending right to left else explicit order (/N) is required. //The results of this test differ from Logstash. assertMatch("%{+a} %{a} %{+a} %{b}", "December 31 1999 quux", @@ -112,7 +112,7 @@ public void testBasicMatch() { String delimiterFirstInput = ""; String delimiterFirstPattern = ""; //parallel arrays - List expectedKeys = Arrays.asList(generateRandomStringArray(100, 10, false, false)); + List expectedKeys = new ArrayList<>(Sets.newSet(generateRandomStringArray(100, 10, false, false))); List expectedValues = new ArrayList<>(expectedKeys.size()); for (String key : expectedKeys) { String value = randomAsciiAlphanumOfLengthBetween(1, 100); @@ -137,6 +137,9 @@ public void testBasicMatchUnicode() { List expectedValues = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 100); i++) { String key = randomAsciiAlphanumOfLengthBetween(1, 100); + while (expectedKeys.contains(key)) { // keys should be unique in this test + key = randomAsciiAlphanumOfLengthBetween(1, 100); + } String value = randomRealisticUnicodeOfCodepointLengthBetween(1, 100); String delimiter = Integer.toString(randomInt()); //int to ensures values and delimiters don't overlap, else validation can fail keyFirstPattern += "%{" + key + "}" + delimiter; @@ -374,13 +377,11 @@ private void assertMatch(String pattern, String input, List expectedKeys private void assertMatch(String pattern, String input, List expectedKeys, List expectedValues, String appendSeperator) { Map results = new DissectParser(pattern, appendSeperator).parse(input); - List foundKeys = new ArrayList<>(results.keySet()); - List foundValues = new ArrayList<>(results.values()); - Collections.sort(foundKeys); - Collections.sort(foundValues); - Collections.sort(expectedKeys); - Collections.sort(expectedValues); - assertThat(foundKeys, Matchers.equalTo(expectedKeys)); - assertThat(foundValues, Matchers.equalTo(expectedValues)); + assertThat(results.size(), Matchers.equalTo(expectedKeys.size())); + assertThat(results.size(), Matchers.equalTo(expectedValues.size())); + for (int i = 0; i < results.size(); i++) { + final String key = expectedKeys.get(i); + assertThat(results.get(key), Matchers.equalTo(expectedValues.get(i))); + } } } diff --git a/libs/plugin-classloader/build.gradle b/libs/plugin-classloader/build.gradle index 4b3e00467b71d..d6af6600d3463 100644 --- a/libs/plugin-classloader/build.gradle +++ b/libs/plugin-classloader/build.gradle @@ -17,7 +17,7 @@ * under the License. */ -unitTest.enabled = false +test.enabled = false // test depend on ES core... forbiddenApisMain.enabled = false diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 8d5b1d18b8c04..6c59e9ddb15b5 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: "nebula.maven-scm" dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" @@ -31,9 +32,23 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" } +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:ssl-config") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + forbiddenApisMain { replaceSignatureFiles 'jdk-signatures' } + forbiddenPatterns { exclude '**/*.key' exclude '**/*.pem' diff --git a/libs/ssl-config/src/main/eclipse.build.gradle b/libs/ssl-config/src/main/eclipse-build.gradle similarity index 72% rename from libs/ssl-config/src/main/eclipse.build.gradle rename to libs/ssl-config/src/main/eclipse-build.gradle index 58b2d7077120a..be8b9d5b3b0bd 100644 --- a/libs/ssl-config/src/main/eclipse.build.gradle +++ b/libs/ssl-config/src/main/eclipse-build.gradle @@ -1,2 +1,2 @@ -// this is just shell gradle file for eclipse to have separate projects for geo src and tests +// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests apply from: '../../build.gradle' diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java index aca7ba56b2ae9..79943870c52f6 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java @@ -397,7 +397,7 @@ private static byte[] possiblyDecryptPKCS1Key(Map pemHeaders, St * defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3 * different variants of 128, 192, 256 bit keys ) * - * @param dekHeaderValue The value of the the DEK-Info PEM header + * @param dekHeaderValue The value of the DEK-Info PEM header * @param password The password with which the key is encrypted * @return a cipher of the appropriate algorithm and parameters to be used for decryption * @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate diff --git a/libs/ssl-config/src/test/eclipse.build.gradle b/libs/ssl-config/src/test/eclipse-build.gradle similarity index 81% rename from libs/ssl-config/src/test/eclipse.build.gradle rename to libs/ssl-config/src/test/eclipse-build.gradle index f8265e3dfed08..aca207a09b7c8 100644 --- a/libs/ssl-config/src/test/eclipse.build.gradle +++ b/libs/ssl-config/src/test/eclipse-build.gradle @@ -1,5 +1,5 @@ -// this is just shell gradle file for eclipse to have separate projects for geo src and tests +// this is just shell gradle file for eclipse to have separate projects for ssl-config src and tests apply from: '../../build.gradle' dependencies { testCompile project(':libs:elasticsearch-ssl-config') -} +} diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index e02f9f176246e..adcbf6ef1bee0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -25,7 +25,7 @@ import java.util.Map; /** - * Wrapper for a XContentParser that makes a single object to look like a complete document. + * Wrapper for a XContentParser that makes a single object/array look like a complete document. * * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be @@ -39,8 +39,8 @@ public class XContentSubParser implements XContentParser { public XContentSubParser(XContentParser parser) { this.parser = parser; - if (parser.currentToken() != Token.START_OBJECT) { - throw new IllegalStateException("The sub parser has to be created on the start of an object"); + if (parser.currentToken() != Token.START_OBJECT && parser.currentToken() != Token.START_ARRAY) { + throw new IllegalStateException("The sub parser has to be created on the start of an object or array"); } level = 1; } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 51bb5c3c65f6d..fa6ffdd0407f9 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -151,6 +151,12 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + private static BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); + private static BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -163,7 +169,11 @@ private static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { - BigDecimal bigDecimalValue = new BigDecimal(stringValue); + final BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); @@ -171,11 +181,11 @@ private static long toLong(String stringValue, boolean coerce) { throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); } - if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || - bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + if (bigIntegerValue.compareTo(LONG_MAX_VALUE_AS_BIGINTEGER) > 0 || bigIntegerValue.compareTo(LONG_MIN_VALUE_AS_BIGINTEGER) < 0) { throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); } + assert bigIntegerValue.longValueExact() <= Long.MAX_VALUE; // asserting that no ArithmeticException is thrown return bigIntegerValue.longValue(); } diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 5dbe7be40f312..606d019f3c4f7 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -329,7 +329,7 @@ public void testNestedMapInList() throws IOException { } } - public void testSubParser() throws IOException { + public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; numberOfTokens = generateRandomObjectForMarking(builder); @@ -354,6 +354,7 @@ public void testSubParser() throws IOException { // And sometimes skipping children subParser.skipChildren(); } + } finally { assertFalse(subParser.isClosed()); subParser.close(); @@ -367,6 +368,49 @@ public void testSubParser() throws IOException { } } + public void testSubParserArray() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + int numberOfArrayElements = randomInt(10); + builder.startObject(); + builder.field("array"); + builder.startArray(); + int numberOfTokens = 0; + for (int i = 0; i < numberOfArrayElements; ++i) { + numberOfTokens += generateRandomObject(builder, 0); + } + builder.endArray(); + builder.endObject(); + + String content = Strings.toString(builder); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // array field + assertEquals("array", parser.currentName()); + assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); // [ + XContentParser subParser = new XContentSubParser(parser); + try { + int tokensToSkip = randomInt(numberOfTokens); + for (int i = 0; i < tokensToSkip; i++) { + // Simulate incomplete parsing + assertNotNull(subParser.nextToken()); + } + if (randomBoolean()) { + // And sometimes skipping children + subParser.skipChildren(); + } + + } finally { + assertFalse(subParser.isClosed()); + subParser.close(); + assertTrue(subParser.isClosed()); + } + assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + public void testCreateSubParserAtAWrongPlace() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); generateRandomObjectForMarking(builder); @@ -377,7 +421,7 @@ public void testCreateSubParserAtAWrongPlace() throws IOException { assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // first field assertEquals("first_field", parser.currentName()); IllegalStateException exception = expectThrows(IllegalStateException.class, () -> new XContentSubParser(parser)); - assertEquals("The sub parser has to be created on the start of an object", exception.getMessage()); + assertEquals("The sub parser has to be created on the start of an object or array", exception.getMessage()); } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java index 8293844f4b6fa..7dcdff426711b 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java @@ -244,12 +244,15 @@ public InternalAggregation doReduce(List aggregations, Redu } RunningStats runningStats = new RunningStats(); - for (int i=0; i < aggs.size(); ++i) { - runningStats.merge(((InternalMatrixStats) aggs.get(i)).stats); + for (InternalAggregation agg : aggs) { + runningStats.merge(((InternalMatrixStats) agg).stats); } - MatrixStatsResults results = new MatrixStatsResults(runningStats); - return new InternalMatrixStats(name, results.getDocCount(), runningStats, results, pipelineAggregators(), getMetaData()); + if (reduceContext.isFinalReduce()) { + MatrixStatsResults results = new MatrixStatsResults(runningStats); + return new InternalMatrixStats(name, results.getDocCount(), runningStats, results, pipelineAggregators(), getMetaData()); + } + return new InternalMatrixStats(name, runningStats.docCount, runningStats, null, pipelineAggregators(), getMetaData()); } @Override diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java index 0512f3d5db3b6..44082b16defb6 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java @@ -58,7 +58,6 @@ public void testNoData() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37587") public void testTwoFields() throws Exception { String fieldA = "a"; MappedFieldType ftA = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); @@ -89,8 +88,49 @@ public void testTwoFields() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") .fields(Arrays.asList(fieldA, fieldB)); - InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); + InternalMatrixStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); + // Since `search` doesn't do any reduction, and the InternalMatrixStats object will have a null `MatrixStatsResults` + // object. That is created during the final reduction, which also does a final round of computations + // So we have to create a MatrixStatsResults object here manually so that the final `compute()` is called multiPassStats.assertNearlyEqual(new MatrixStatsResults(stats.getStats())); + } + } + } + + public void testTwoFieldsReduce() throws Exception { + String fieldA = "a"; + MappedFieldType ftA = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + ftA.setName(fieldA); + String fieldB = "b"; + MappedFieldType ftB = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); + ftB.setName(fieldB); + + try (Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + + int numDocs = scaledRandomIntBetween(8192, 16384); + Double[] fieldAValues = new Double[numDocs]; + Double[] fieldBValues = new Double[numDocs]; + for (int docId = 0; docId < numDocs; docId++) { + Document document = new Document(); + fieldAValues[docId] = randomDouble(); + document.add(new SortedNumericDocValuesField(fieldA, NumericUtils.doubleToSortableLong(fieldAValues[docId]))); + + fieldBValues[docId] = randomDouble(); + document.add(new SortedNumericDocValuesField(fieldB, NumericUtils.doubleToSortableLong(fieldBValues[docId]))); + indexWriter.addDocument(document); + } + + MultiPassStats multiPassStats = new MultiPassStats(fieldA, fieldB); + multiPassStats.computeStats(Arrays.asList(fieldAValues), Arrays.asList(fieldBValues)); + try (IndexReader reader = indexWriter.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") + .fields(Arrays.asList(fieldA, fieldB)); + InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); + // Unlike testTwoFields, `searchAndReduce` will execute reductions so the `MatrixStatsResults` object + // will be populated and fully computed. We should use that value directly to test against + multiPassStats.assertNearlyEqual(stats); assertTrue(MatrixAggregationInspectionHelper.hasValue(stats)); } } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java index b5a348f45eb54..cd4ee3ee849ee 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java @@ -136,6 +136,30 @@ void assertNearlyEqual(MatrixStatsResults stats) { assertTrue(nearlyEqual(correlations.get(fieldBKey).get(fieldAKey), stats.getCorrelation(fieldBKey, fieldAKey), 1e-7)); } + void assertNearlyEqual(InternalMatrixStats stats) { + assertEquals(count, stats.getDocCount()); + assertEquals(count, stats.getFieldCount(fieldAKey)); + assertEquals(count, stats.getFieldCount(fieldBKey)); + // means + assertTrue(nearlyEqual(means.get(fieldAKey), stats.getMean(fieldAKey), 1e-7)); + assertTrue(nearlyEqual(means.get(fieldBKey), stats.getMean(fieldBKey), 1e-7)); + // variances + assertTrue(nearlyEqual(variances.get(fieldAKey), stats.getVariance(fieldAKey), 1e-7)); + assertTrue(nearlyEqual(variances.get(fieldBKey), stats.getVariance(fieldBKey), 1e-7)); + // skewness (multi-pass is more susceptible to round-off error so we need to slightly relax the tolerance) + assertTrue(nearlyEqual(skewness.get(fieldAKey), stats.getSkewness(fieldAKey), 1e-4)); + assertTrue(nearlyEqual(skewness.get(fieldBKey), stats.getSkewness(fieldBKey), 1e-4)); + // kurtosis (multi-pass is more susceptible to round-off error so we need to slightly relax the tolerance) + assertTrue(nearlyEqual(kurtosis.get(fieldAKey), stats.getKurtosis(fieldAKey), 1e-4)); + assertTrue(nearlyEqual(kurtosis.get(fieldBKey), stats.getKurtosis(fieldBKey), 1e-4)); + // covariances + assertTrue(nearlyEqual(covariances.get(fieldAKey).get(fieldBKey),stats.getCovariance(fieldAKey, fieldBKey), 1e-7)); + assertTrue(nearlyEqual(covariances.get(fieldBKey).get(fieldAKey),stats.getCovariance(fieldBKey, fieldAKey), 1e-7)); + // correlation + assertTrue(nearlyEqual(correlations.get(fieldAKey).get(fieldBKey), stats.getCorrelation(fieldAKey, fieldBKey), 1e-7)); + assertTrue(nearlyEqual(correlations.get(fieldBKey).get(fieldAKey), stats.getCorrelation(fieldBKey, fieldAKey), 1e-7)); + } + private static boolean nearlyEqual(double a, double b, double epsilon) { final double absA = Math.abs(a); final double absB = Math.abs(b); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 0059f7460a873..bf9b25bf16d4a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -399,7 +399,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("cjk_bigram", false, CJKBigramFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("cjk_width", true, CJKWidthFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("classic", false, ClassicFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("common_grams", false, + filters.add(PreConfiguredTokenFilter.singleton("common_grams", false, false, input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET))); filters.add(PreConfiguredTokenFilter.singleton("czech_stem", false, CzechStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("decimal_digit", true, DecimalDigitFilter::new)); @@ -412,10 +412,14 @@ public List getPreConfiguredTokenFilters() { DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); - filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> + filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { - if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_7_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead."); + } else { deprecationLogger.deprecatedAndMaybeLog("edgeNGram_deprecation", "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [edge_ngram] instead."); @@ -437,9 +441,12 @@ public List getPreConfiguredTokenFilters() { new LimitTokenCountFilter(input, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); - filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false))); - filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { - if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_7_0_0)) { + throw new IllegalArgumentException("The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead."); + } else { deprecationLogger.deprecatedAndMaybeLog("nGram_deprecation", "The [nGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [ngram] instead."); @@ -452,7 +459,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("russian_stem", false, input -> new SnowballFilter(input, "Russian"))); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_folding", true, ScandinavianFoldingFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("shingle", false, input -> { + filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); /** * We disable the graph analysis on this token stream @@ -474,14 +481,14 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("unique", false, UniqueTokenFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("uppercase", true, UpperCaseFilter::new)); - filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, input -> + filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, false, input -> new WordDelimiterFilter(input, WordDelimiterFilter.GENERATE_WORD_PARTS | WordDelimiterFilter.GENERATE_NUMBER_PARTS | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null))); - filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, input -> + filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, false, input -> new WordDelimiterGraphFilter(input, WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index c52c78ffe27e3..ce25646050ae8 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -41,11 +41,12 @@ public class CommonAnalysisPluginTests extends ESTestCase { /** - * Check that the deprecated name "nGram" issues a deprecation warning for indices created since 6.3.0 + * Check that the deprecated name "nGram" issues a deprecation warning for indices created since 6.0.0 */ public void testNGramDeprecationWarning() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0))) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -62,12 +63,11 @@ public void testNGramDeprecationWarning() throws IOException { } /** - * Check that the deprecated name "nGram" does NOT issues a deprecation warning for indices created before 6.4.0 + * Check that the deprecated name "nGram" throws an error since 7.0.0 */ - public void testNGramNoDeprecationWarningPre6_4() throws IOException { + public void testNGramDeprecationError() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, null)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -76,16 +76,21 @@ public void testNGramNoDeprecationWarningPre6_4() throws IOException { TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); Tokenizer tokenizer = new MockTokenizer(); tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> tokenFilterFactory.create(tokenizer)); + assertEquals( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. Please change the filter" + + " name to [ngram] instead.", + ex.getMessage()); } } /** - * Check that the deprecated name "edgeNGram" issues a deprecation warning for indices created since 6.3.0 + * Check that the deprecated name "edgeNGram" issues a deprecation warning for indices created since 6.0.0 */ public void testEdgeNGramDeprecationWarning() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_0_0))) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -102,12 +107,11 @@ public void testEdgeNGramDeprecationWarning() throws IOException { } /** - * Check that the deprecated name "edgeNGram" does NOT issues a deprecation warning for indices created before 6.4.0 + * Check that the deprecated name "edgeNGram" throws an error for indices created since 7.0.0 */ - public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { + public void testEdgeNGramDeprecationError() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put(IndexMetaData.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_3_0)) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, null)) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); @@ -116,11 +120,14 @@ public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); Tokenizer tokenizer = new MockTokenizer(); tokenizer.setReader(new StringReader("foo bar")); - assertNotNull(tokenFilterFactory.create(tokenizer)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> tokenFilterFactory.create(tokenizer)); + assertEquals( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. Please change the filter" + + " name to [edge_ngram] instead.", + ex.getMessage()); } } - /** * Check that the deprecated analyzer name "standard_html_strip" throws exception for indices created since 7.0.0 */ diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java index e96243efc4254..8f58a074cf102 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -81,7 +81,7 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { .put("analysis.tokenizer.autocomplete.max_gram", 20) .put("analysis.tokenizer.autocomplete.min_gram", 1) .put("analysis.tokenizer.autocomplete.token_chars", "letter,digit") - .put("analysis.tokenizer.autocomplete.type", "nGram") + .put("analysis.tokenizer.autocomplete.type", "ngram") .put("analysis.filter.wordDelimiter.type", "word_delimiter") .putList("analysis.filter.wordDelimiter.type_table", "& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM", diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index a63dd97568807..6582188f33c0b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.test.ESTestCase; @@ -42,8 +43,11 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -163,23 +167,21 @@ public void testAsciiFoldingFilterForSynonyms() throws IOException { new int[]{ 1, 0 }); } - public void testKeywordRepeatAndSynonyms() throws IOException { + public void testPreconfigured() throws IOException { Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") - .putList("index.analysis.filter.synonyms.synonyms", "programmer, developer") - .put("index.analysis.filter.my_english.type", "stemmer") - .put("index.analysis.filter.my_english.language", "porter2") - .put("index.analysis.analyzer.synonymAnalyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.synonymAnalyzer.filter", "lowercase", "keyword_repeat", "my_english", "synonyms") + .putList("index.analysis.filter.synonyms.synonyms", "würst, sausage") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "lowercase", "asciifolding", "synonyms") .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; - BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("synonymAnalyzer"), "programmers", - new String[]{ "programmers", "programm", "develop" }, - new int[]{ 1, 0, 0 }); + BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("my_analyzer"), "würst", + new String[]{ "wurst", "sausage"}, + new int[]{ 1, 0 }); } public void testChainedSynonymFilters() throws IOException { @@ -248,6 +250,58 @@ public void testTokenFiltersBypassSynonymAnalysis() throws IOException { } + public void testPreconfiguredTokenFilters() throws IOException { + Set disallowedFilters = new HashSet<>(Arrays.asList( + "common_grams", "edge_ngram", "edgeNGram", "keyword_repeat", "ngram", "nGram", + "shingle", "word_delimiter", "word_delimiter_graph" + )); + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT)) + .put("path.home", createTempDir().toString()) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + + for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { + if (disallowedFilters.contains(tf.getName())) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + "Expected exception for factory " + tf.getName(), () -> { + tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); + }); + assertEquals(tf.getName(), "Token filter [" + tf.getName() + + "] cannot be used to parse synonyms", + e.getMessage()); + } + else { + tf.get(idxSettings, null, tf.getName(), settings).getSynonymFilter(); + } + } + + Settings settings2 = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0))) + .put("path.home", createTempDir().toString()) + .putList("common_words", "a", "b") + .put("output_unigrams", "true") + .build(); + IndexSettings idxSettings2 = IndexSettingsModule.newIndexSettings("index", settings2); + + List expectedWarnings = new ArrayList<>(); + for (PreConfiguredTokenFilter tf : plugin.getPreConfiguredTokenFilters()) { + if (disallowedFilters.contains(tf.getName())) { + tf.get(idxSettings2, null, tf.getName(), settings2).getSynonymFilter(); + expectedWarnings.add("Token filter [" + tf.getName() + "] will not be usable to parse synonyms after v7.0"); + } + else { + tf.get(idxSettings2, null, tf.getName(), settings2).getSynonymFilter(); + } + } + assertWarnings(expectedWarnings.toArray(new String[0])); + } + public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 9a7c158fc4734..460bc8ecf83af 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -23,24 +23,7 @@ - match: { detail.tokenizer.tokens.0.token: Foo Bar! } --- -"nGram": - - do: - indices.analyze: - body: - text: good - explain: true - tokenizer: - type: nGram - min_gram: 2 - max_gram: 2 - - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: _anonymous_tokenizer } - - match: { detail.tokenizer.tokens.0.token: go } - - match: { detail.tokenizer.tokens.1.token: oo } - - match: { detail.tokenizer.tokens.2.token: od } - ---- -"nGram_exception": +"ngram_exception": - skip: version: " - 6.99.99" reason: only starting from version 7.x this throws an error @@ -51,7 +34,7 @@ text: good explain: true tokenizer: - type: nGram + type: ngram min_gram: 2 max_gram: 4 --- @@ -133,7 +116,7 @@ text: "foobar" explain: true tokenizer: - type: nGram + type: ngram min_gram: 3 max_gram: 3 - length: { detail.tokenizer.tokens: 4 } @@ -162,9 +145,9 @@ body: text: "foo" explain: true - tokenizer: nGram + tokenizer: ngram - length: { detail.tokenizer.tokens: 5 } - - match: { detail.tokenizer.name: nGram } + - match: { detail.tokenizer.name: ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } - match: { detail.tokenizer.tokens.2.token: o } @@ -194,7 +177,7 @@ text: "foo" explain: true tokenizer: - type: edgeNGram + type: edge_ngram min_gram: 1 max_gram: 3 - length: { detail.tokenizer.tokens: 3 } @@ -219,9 +202,9 @@ body: text: "foo" explain: true - tokenizer: edgeNGram + tokenizer: edge_ngram - length: { detail.tokenizer.tokens: 2 } - - match: { detail.tokenizer.name: edgeNGram } + - match: { detail.tokenizer.name: edge_ngram } - match: { detail.tokenizer.tokens.0.token: f } - match: { detail.tokenizer.tokens.1.token: fo } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index ec00b6d41f1c5..56bbed7044e14 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -76,7 +76,7 @@ analysis: tokenizer: trigram: - type: nGram + type: ngram min_gram: 3 max_gram: 3 filter: diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 05aa75944d2f9..be5d7e47f1c02 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -45,7 +45,7 @@ enum DateFormat { Iso8601 { @Override Function getFunction(String format, ZoneId timezone, Locale locale) { - return (date) -> DateFormatters.from(DateFormatter.forPattern("strict_date_time").parse(date)).withZoneSameInstant(timezone); + return (date) -> DateFormatters.from(DateFormatter.forPattern("iso8601").parse(date)).withZoneSameInstant(timezone); } }, Unix { @@ -87,15 +87,21 @@ Function getFunction(String format, ZoneId zoneId, Locale format = format.substring(1); } - int year = LocalDate.now(ZoneOffset.UTC).getYear(); - DateFormatter formatter = DateFormatter.forPattern(format) - .withLocale(locale) - .withZone(zoneId); + boolean isUtc = ZoneOffset.UTC.equals(zoneId); + + DateFormatter dateFormatter = DateFormatter.forPattern(format) + .withLocale(locale); + // if UTC zone is set here, the time zone specified in the format will be ignored, leading to wrong dates + if (isUtc == false) { + dateFormatter = dateFormatter.withZone(zoneId); + } + final DateFormatter formatter = dateFormatter; return text -> { TemporalAccessor accessor = formatter.parse(text); // if there is no year, we fall back to the current one and // fill the rest of the date up with the parsed date if (accessor.isSupported(ChronoField.YEAR) == false) { + int year = LocalDate.now(ZoneOffset.UTC).getYear(); ZonedDateTime newTime = Instant.EPOCH.atZone(ZoneOffset.UTC).withYear(year); for (ChronoField field : FIELDS) { if (accessor.isSupported(field)) { @@ -106,7 +112,11 @@ Function getFunction(String format, ZoneId zoneId, Locale accessor = newTime.withZoneSameLocal(zoneId); } - return DateFormatters.from(accessor); + if (isUtc) { + return DateFormatters.from(accessor).withZoneSameInstant(ZoneOffset.UTC); + } else { + return DateFormatters.from(accessor); + } }; } }; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index e7ad1356977e0..390279bdbb5d7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -43,7 +43,7 @@ public final class DateProcessor extends AbstractProcessor { public static final String TYPE = "date"; static final String DEFAULT_TARGET_FIELD = "@timestamp"; - public static final DateFormatter FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); private final TemplateScript.Factory timezone; private final TemplateScript.Factory locale; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java index 136c9f7f69a0a..e44e62be8629a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.test.ESTestCase; @@ -43,6 +44,14 @@ public void testParseJava() { equalTo("11 24 01:29:01")); } + public void testParseJavaWithTimeZone() { + Function javaFunction = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", + ZoneOffset.UTC, Locale.ROOT); + ZonedDateTime datetime = javaFunction.apply("2018-02-05T13:44:56.657+0100"); + String expectedDateTime = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").withZone(ZoneOffset.UTC).format(datetime); + assertThat(expectedDateTime, is("2018-02-05T12:44:56.657Z")); + } + public void testParseJavaDefaultYear() { String format = randomFrom("8dd/MM", "dd/MM"); ZoneId timezone = DateUtils.of("Europe/Amsterdam"); @@ -70,6 +79,10 @@ public void testParseUnixWithMsPrecision() { public void testParseISO8601() { assertThat(DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null).apply("2001-01-01T00:00:00-0800").toInstant().toEpochMilli(), equalTo(978336000000L)); + assertThat(DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null).apply("2001-01-01T00:00:00-0800").toString(), + equalTo("2001-01-01T08:00Z")); + assertThat(DateFormat.Iso8601.getFunction(null, ZoneOffset.UTC, null).apply("2001-01-01T00:00:00-0800").toString(), + equalTo("2001-01-01T08:00Z")); } public void testParseISO8601Failure() { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index 2e32e3fd0ebd2..7582056e0b6b6 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -29,6 +29,7 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -97,6 +98,18 @@ public void testJavaPatternMultipleFormats() { } } + public void testJavaPatternNoTimezone() { + DateProcessor dateProcessor = new DateProcessor(randomAlphaOfLength(10), + null, null, + "date_as_string", Arrays.asList("yyyy dd MM HH:mm:ss XXX"), "date_as_date"); + + Map document = new HashMap<>(); + document.put("date_as_string", "2010 12 06 00:00:00 -02:00"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + dateProcessor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo("2010-06-12T02:00:00.000Z")); + } + public void testInvalidJavaPattern() { try { DateProcessor processor = new DateProcessor(randomAlphaOfLength(10), diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml index d1bb3b063a7c4..77a1df81a296a 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/220_drop_processor.yml @@ -91,4 +91,4 @@ teardown: get: index: test id: 3 -- match: { found: false } + diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/230_change_target_index.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/230_change_target_index.yml new file mode 100644 index 0000000000000..bb2677f9b193f --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/230_change_target_index.yml @@ -0,0 +1,119 @@ +--- +teardown: +- do: + ingest.delete_pipeline: + id: "retarget" + ignore: 404 + +- do: + indices.delete: + index: foo + +--- +"Test Change Target Index with Explicit Pipeline": + +- do: + ingest.put_pipeline: + id: "retarget" + body: > + { + "processors": [ + { + "set" : { + "field" : "_index", + "value" : "foo" + } + } + ] + } +- match: { acknowledged: true } + +# no indices +- do: + cat.indices: {} + +- match: + $body: | + /^$/ + +- do: + index: + index: test + id: 1 + pipeline: "retarget" + body: { + a: true + } + +- do: + get: + index: foo + id: 1 +- match: { _source.a: true } + +# only the foo index +- do: + cat.indices: + h: i + +- match: + $body: | + /^foo\n$/ + +--- +"Test Change Target Index with Default Pipeline": + +- do: + indices.put_template: + name: index_template + body: + index_patterns: test + settings: + default_pipeline: "retarget" + +- do: + ingest.put_pipeline: + id: "retarget" + body: > + { + "processors": [ + { + "set" : { + "field" : "_index", + "value" : "foo" + } + } + ] + } +- match: { acknowledged: true } + +# no indices +- do: + cat.indices: {} + +- match: + $body: | + /^$/ + +- do: + index: + index: test + id: 1 + body: { + a: true + } + +- do: + get: + index: foo + id: 1 +- match: { _source.a: true } + +# only the foo index +- do: + cat.indices: + h: i + +- match: + $body: | + /^foo\n$/ diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml index b2e83c640388a..99e90064da013 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/30_date_processor.yml @@ -39,3 +39,94 @@ teardown: id: 1 - match: { _source.date_source_field: "12/06/2010" } - match: { _source.date_target_field: "2010-06-12T00:00:00.000+02:00" } + +--- +"Test date processor with no timezone configured": + + - do: + ingest.put_pipeline: + id: "my_pipeline" + # sample formats from beats, featuring mongodb, icinga, apache + body: > + { + "description": "_description", + "processors": [ + { + "date" : { + "field" : "date_source_1", + "target_field" : "date_target_1", + "formats" : ["yyyy-MM-dd'T'HH:mm:ss.SSSZZ" ] + } + }, + { + "date" : { + "field" : "date_source_2", + "target_field" : "date_target_2", + "formats" : ["yyyy-MM-dd HH:mm:ss Z" ] + } + }, + { + "date" : { + "field" : "date_source_3", + "target_field" : "date_target_3", + "formats" : [ "dd/MMM/yyyy:H:m:s Z" ] + } + }, + { + "date" : { + "field" : "date_source_4", + "target_field" : "date_target_4", + "formats" : [ "UNIX" ] + } + }, + { + "date" : { + "field" : "date_source_5", + "target_field" : "date_target_5", + "formats" : [ "UNIX_MS" ] + } + }, + { + "date" : { + "field" : "date_source_6", + "target_field" : "date_target_6", + "formats" : [ "TAI64N" ] + } + }, + { + "date" : { + "field" : "date_source_7", + "target_field" : "date_target_7", + "formats" : [ "ISO8601" ] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { date_source_1: "2018-02-05T13:44:56.657+0100", date_source_2: "2017-04-04 13:43:09 +0200", date_source_3: "10/Aug/2018:09:45:56 +0200", date_source_4: "1", date_source_5: "1", date_source_6: "4000000050d506482dbdf024", date_source_7: "2018-02-05T13:44:56.657+0100" } + + - do: + get: + index: test + id: 1 + - match: { _source.date_source_1: "2018-02-05T13:44:56.657+0100" } + - match: { _source.date_target_1: "2018-02-05T12:44:56.657Z" } + - match: { _source.date_source_2: "2017-04-04 13:43:09 +0200" } + - match: { _source.date_target_2: "2017-04-04T11:43:09.000Z" } + - match: { _source.date_source_3: "10/Aug/2018:09:45:56 +0200" } + - match: { _source.date_target_3: "2018-08-10T07:45:56.000Z" } + - match: { _source.date_source_4: "1" } + - match: { _source.date_target_4: "1970-01-01T00:00:01.000Z" } + - match: { _source.date_source_5: "1" } + - match: { _source.date_target_5: "1970-01-01T00:00:00.001Z" } + - match: { _source.date_source_6: "4000000050d506482dbdf024" } + - match: { _source.date_target_6: "2012-12-22T01:00:46.767Z" } + - match: { _source.date_source_7: "2018-02-05T13:44:56.657+0100" } + - match: { _source.date_target_7: "2018-02-05T12:44:56.657Z" } + diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 1f356792032b6..f2dec9cd7b7fa 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -27,7 +27,7 @@ dependencies { compile ('com.maxmind.geoip2:geoip2:2.9.0') // geoip2 dependencies: compile("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") - compile("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") + compile("com.fasterxml.jackson.core:jackson-databind:2.8.11.3") compile('com.maxmind.db:maxmind-db:1.2.2') testCompile 'org.elasticsearch:geolite2-databases:20180911' diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.8.11.3.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.8.11.3.jar.sha1 new file mode 100644 index 0000000000000..253a1361931c3 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.8.11.3.jar.sha1 @@ -0,0 +1 @@ +844df5aba5a1a56e00905b165b12bb34116ee858 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.8.11.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.8.11.jar.sha1 deleted file mode 100644 index 53d6c1fa20834..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.8.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0569a9f220273024523799dba9dd358121b0ee09 \ No newline at end of file diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index d83762a5e47ab..1d5b1e0fbc503 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -27,9 +27,11 @@ import org.elasticsearch.ingest.useragent.UserAgentParser.Details; import org.elasticsearch.ingest.useragent.UserAgentParser.VersionedName; +import java.lang.reflect.Field; import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -51,15 +53,17 @@ public class UserAgentProcessor extends AbstractProcessor { private final Set properties; private final UserAgentParser parser; private final boolean ignoreMissing; + private final boolean useECS; public UserAgentProcessor(String tag, String field, String targetField, UserAgentParser parser, Set properties, - boolean ignoreMissing) { + boolean ignoreMissing, boolean useECS) { super(tag); this.field = field; this.targetField = targetField; this.parser = parser; this.properties = properties; this.ignoreMissing = ignoreMissing; + this.useECS = useECS; } boolean isIgnoreMissing() { @@ -80,68 +84,135 @@ public IngestDocument execute(IngestDocument ingestDocument) { Map uaDetails = new HashMap<>(); - // Parse the user agent in the ECS (Elastic Common Schema) format - for (Property property : this.properties) { - switch (property) { - case ORIGINAL: - uaDetails.put("original", userAgent); - break; - case NAME: - if (uaClient.userAgent != null && uaClient.userAgent.name != null) { - uaDetails.put("name", uaClient.userAgent.name); - } else { - uaDetails.put("name", "Other"); - } - break; - case VERSION: - StringBuilder version = new StringBuilder(); - if (uaClient.userAgent != null && uaClient.userAgent.major != null) { - version.append(uaClient.userAgent.major); - if (uaClient.userAgent.minor != null) { - version.append(".").append(uaClient.userAgent.minor); - if (uaClient.userAgent.patch != null) { - version.append(".").append(uaClient.userAgent.patch); - if (uaClient.userAgent.build != null) { - version.append(".").append(uaClient.userAgent.build); + if (useECS) { + // Parse the user agent in the ECS (Elastic Common Schema) format + for (Property property : this.properties) { + switch (property) { + case ORIGINAL: + uaDetails.put("original", userAgent); + break; + case NAME: + if (uaClient.userAgent != null && uaClient.userAgent.name != null) { + uaDetails.put("name", uaClient.userAgent.name); + } else { + uaDetails.put("name", "Other"); + } + break; + case VERSION: + StringBuilder version = new StringBuilder(); + if (uaClient.userAgent != null && uaClient.userAgent.major != null) { + version.append(uaClient.userAgent.major); + if (uaClient.userAgent.minor != null) { + version.append(".").append(uaClient.userAgent.minor); + if (uaClient.userAgent.patch != null) { + version.append(".").append(uaClient.userAgent.patch); + if (uaClient.userAgent.build != null) { + version.append(".").append(uaClient.userAgent.build); + } } } + uaDetails.put("version", version.toString()); } - uaDetails.put("version", version.toString()); - } - break; - case OS: - if (uaClient.operatingSystem != null) { - Map osDetails = new HashMap<>(3); - if (uaClient.operatingSystem.name != null) { - osDetails.put("name", uaClient.operatingSystem.name); - StringBuilder sb = new StringBuilder(); - if (uaClient.operatingSystem.major != null) { - sb.append(uaClient.operatingSystem.major); - if (uaClient.operatingSystem.minor != null) { - sb.append(".").append(uaClient.operatingSystem.minor); - if (uaClient.operatingSystem.patch != null) { - sb.append(".").append(uaClient.operatingSystem.patch); - if (uaClient.operatingSystem.build != null) { - sb.append(".").append(uaClient.operatingSystem.build); + break; + case OS: + if (uaClient.operatingSystem != null) { + Map osDetails = new HashMap<>(3); + if (uaClient.operatingSystem.name != null) { + osDetails.put("name", uaClient.operatingSystem.name); + StringBuilder sb = new StringBuilder(); + if (uaClient.operatingSystem.major != null) { + sb.append(uaClient.operatingSystem.major); + if (uaClient.operatingSystem.minor != null) { + sb.append(".").append(uaClient.operatingSystem.minor); + if (uaClient.operatingSystem.patch != null) { + sb.append(".").append(uaClient.operatingSystem.patch); + if (uaClient.operatingSystem.build != null) { + sb.append(".").append(uaClient.operatingSystem.build); + } } } + osDetails.put("version", sb.toString()); + osDetails.put("full", uaClient.operatingSystem.name + " " + sb.toString()); } - osDetails.put("version", sb.toString()); - osDetails.put("full", uaClient.operatingSystem.name + " " + sb.toString()); + uaDetails.put("os", osDetails); } - uaDetails.put("os", osDetails); } - } - break; - case DEVICE: - Map deviceDetails = new HashMap<>(1); - if (uaClient.device != null && uaClient.device.name != null) { - deviceDetails.put("name", uaClient.device.name); - } else { - deviceDetails.put("name", "Other"); - } - uaDetails.put("device", deviceDetails); - break; + break; + case DEVICE: + Map deviceDetails = new HashMap<>(1); + if (uaClient.device != null && uaClient.device.name != null) { + deviceDetails.put("name", uaClient.device.name); + } else { + deviceDetails.put("name", "Other"); + } + uaDetails.put("device", deviceDetails); + break; + } + } + } else { + // Deprecated format, removed in 8.0 + for (Property property : this.properties) { + switch (property) { + case NAME: + if (uaClient.userAgent != null && uaClient.userAgent.name != null) { + uaDetails.put("name", uaClient.userAgent.name); + } else { + uaDetails.put("name", "Other"); + } + break; + case MAJOR: + if (uaClient.userAgent != null && uaClient.userAgent.major != null) { + uaDetails.put("major", uaClient.userAgent.major); + } + break; + case MINOR: + if (uaClient.userAgent != null && uaClient.userAgent.minor != null) { + uaDetails.put("minor", uaClient.userAgent.minor); + } + break; + case PATCH: + if (uaClient.userAgent != null && uaClient.userAgent.patch != null) { + uaDetails.put("patch", uaClient.userAgent.patch); + } + break; + case BUILD: + if (uaClient.userAgent != null && uaClient.userAgent.build != null) { + uaDetails.put("build", uaClient.userAgent.build); + } + break; + case OS: + if (uaClient.operatingSystem != null) { + uaDetails.put("os", buildFullOSName(uaClient.operatingSystem)); + } else { + uaDetails.put("os", "Other"); + } + + break; + case OS_NAME: + if (uaClient.operatingSystem != null && uaClient.operatingSystem.name != null) { + uaDetails.put("os_name", uaClient.operatingSystem.name); + } else { + uaDetails.put("os_name", "Other"); + } + break; + case OS_MAJOR: + if (uaClient.operatingSystem != null && uaClient.operatingSystem.major != null) { + uaDetails.put("os_major", uaClient.operatingSystem.major); + } + break; + case OS_MINOR: + if (uaClient.operatingSystem != null && uaClient.operatingSystem.minor != null) { + uaDetails.put("os_minor", uaClient.operatingSystem.minor); + } + break; + case DEVICE: + if (uaClient.device != null && uaClient.device.name != null) { + uaDetails.put("device", uaClient.device.name); + } else { + uaDetails.put("device", "Other"); + } + break; + } } } @@ -201,6 +272,10 @@ UserAgentParser getUaParser() { return parser; } + public boolean isUseECS() { + return useECS; + } + public static final class Factory implements Processor.Factory { private final Map userAgentParsers; @@ -217,10 +292,7 @@ public UserAgentProcessor create(Map factories, Strin String regexFilename = readStringProperty(TYPE, processorTag, config, "regex_file", IngestUserAgentPlugin.DEFAULT_PARSER_NAME); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - Object ecsValue = config.remove("ecs"); - if (ecsValue != null) { - deprecationLogger.deprecated("setting [ecs] is deprecated as ECS format is the default and only option"); - } + boolean useECS = readBooleanProperty(TYPE, processorTag, config, "ecs", true); UserAgentParser parser = userAgentParsers.get(regexFilename); if (parser == null) { @@ -242,22 +314,53 @@ public UserAgentProcessor create(Map factories, Strin properties = EnumSet.allOf(Property.class); } - return new UserAgentProcessor(processorTag, field, targetField, parser, properties, ignoreMissing); + if (useECS == false) { + deprecationLogger.deprecated("setting [ecs] to false for non-common schema " + + "format is deprecated and will be removed in 8.0, set to true or remove to use the non-deprecated format"); + } + + return new UserAgentProcessor(processorTag, field, targetField, parser, properties, ignoreMissing, useECS); } } enum Property { NAME, + // Deprecated in 6.7 (superceded by VERSION), to be removed in 8.0 + @Deprecated MAJOR, + @Deprecated MINOR, + @Deprecated PATCH, OS, + // Deprecated in 6.7 (superceded by just using OS), to be removed in 8.0 + @Deprecated OS_NAME, + @Deprecated OS_MAJOR, + @Deprecated OS_MINOR, DEVICE, + @Deprecated BUILD, // Same deprecated as OS_* above ORIGINAL, VERSION; + private static Set DEPRECATED_PROPERTIES; + + static { + Set deprecated = new HashSet<>(); + for (Field field : Property.class.getFields()) { + if (field.isEnumConstant() && field.isAnnotationPresent(Deprecated.class)) { + deprecated.add(valueOf(field.getName())); + } + } + DEPRECATED_PROPERTIES = deprecated; + } + public static Property parseProperty(String propertyName) { try { - return valueOf(propertyName.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException e) { + Property value = valueOf(propertyName.toUpperCase(Locale.ROOT)); + if (DEPRECATED_PROPERTIES.contains(value)) { + deprecationLogger.deprecated("the [{}] property is deprecated for the user-agent processor", propertyName); + } + return value; + } + catch (IllegalArgumentException e) { throw new IllegalArgumentException("illegal property value [" + propertyName + "]. valid values are " + Arrays.toString(EnumSet.allOf(Property.class).toArray())); } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorFactoryTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorFactoryTests.java index f723c13f23022..c25200b457985 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorFactoryTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorFactoryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest.useragent; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; import org.junit.BeforeClass; @@ -27,17 +28,21 @@ import java.io.BufferedWriter; import java.io.IOException; import java.io.InputStreamReader; +import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -84,12 +89,12 @@ public void testBuildDefaults() throws Exception { UserAgentProcessor processor = factory.create(null, processorTag, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo("_field")); - assertThat(processor.getTargetField(), equalTo("user_agent")); assertThat(processor.getUaParser().getUaPatterns().size(), greaterThan(0)); assertThat(processor.getUaParser().getOsPatterns().size(), greaterThan(0)); assertThat(processor.getUaParser().getDevicePatterns().size(), greaterThan(0)); assertThat(processor.getProperties(), equalTo(EnumSet.allOf(UserAgentProcessor.Property.class))); assertFalse(processor.isIgnoreMissing()); + assertTrue(processor.isUseECS()); } public void testBuildWithIgnoreMissing() throws Exception { @@ -98,6 +103,7 @@ public void testBuildWithIgnoreMissing() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); config.put("ignore_missing", true); + config.put("ecs", true); String processorTag = randomAlphaOfLength(10); @@ -118,6 +124,7 @@ public void testBuildTargetField() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); config.put("target_field", "_target_field"); + config.put("ecs", true); UserAgentProcessor processor = factory.create(null, null, config); assertThat(processor.getField(), equalTo("_field")); @@ -130,6 +137,7 @@ public void testBuildRegexFile() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); config.put("regex_file", regexWithoutDevicesFilename); + config.put("ecs", true); UserAgentProcessor processor = factory.create(null, null, config); assertThat(processor.getField(), equalTo("_field")); @@ -155,8 +163,17 @@ public void testBuildFields() throws Exception { Set properties = EnumSet.noneOf(UserAgentProcessor.Property.class); List fieldNames = new ArrayList<>(); int numFields = scaledRandomIntBetween(1, UserAgentProcessor.Property.values().length); + Set warnings = new HashSet<>(); + Set deprecated = Arrays.stream(UserAgentProcessor.Property.class.getFields()) + .filter(Field::isEnumConstant) + .filter(field -> field.isAnnotationPresent(Deprecated.class)) + .map(field -> UserAgentProcessor.Property.valueOf(field.getName())) + .collect(Collectors.toSet()); for (int i = 0; i < numFields; i++) { UserAgentProcessor.Property property = UserAgentProcessor.Property.values()[i]; + if (deprecated.contains(property)) { + warnings.add("the [" + property.name().toLowerCase(Locale.ROOT) + "] property is deprecated for the user-agent processor"); + } properties.add(property); fieldNames.add(property.name().toLowerCase(Locale.ROOT)); } @@ -164,10 +181,14 @@ public void testBuildFields() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); config.put("properties", fieldNames); + config.put("ecs", true); UserAgentProcessor processor = factory.create(null, null, config); assertThat(processor.getField(), equalTo("_field")); assertThat(processor.getProperties(), equalTo(properties)); + if (warnings.size() > 0) { + assertWarnings(warnings.toArray(Strings.EMPTY_ARRAY)); + } } public void testInvalidProperty() throws Exception { @@ -178,8 +199,8 @@ public void testInvalidProperty() throws Exception { config.put("properties", Collections.singletonList("invalid")); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, config)); - assertThat(e.getMessage(), equalTo("[properties] illegal property value [invalid]. valid values are [NAME, OS, DEVICE, " + - "ORIGINAL, VERSION]")); + assertThat(e.getMessage(), equalTo("[properties] illegal property value [invalid]. valid values are [NAME, MAJOR, MINOR, " + + "PATCH, OS, OS_NAME, OS_MAJOR, OS_MINOR, DEVICE, BUILD, ORIGINAL, VERSION]")); } public void testInvalidPropertiesType() throws Exception { diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java index f043cc5369a26..4cb270e75a94b 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java @@ -48,12 +48,12 @@ public static void setupProcessor() throws IOException { UserAgentParser parser = new UserAgentParser(randomAlphaOfLength(10), regexStream, new UserAgentCache(1000)); processor = new UserAgentProcessor(randomAlphaOfLength(10), "source_field", "target_field", parser, - EnumSet.allOf(UserAgentProcessor.Property.class), false); + EnumSet.allOf(UserAgentProcessor.Property.class), false, true); } public void testNullValueWithIgnoreMissing() throws Exception { UserAgentProcessor processor = new UserAgentProcessor(randomAlphaOfLength(10), "source_field", "target_field", null, - EnumSet.allOf(UserAgentProcessor.Property.class), true); + EnumSet.allOf(UserAgentProcessor.Property.class), true, true); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); @@ -63,7 +63,7 @@ public void testNullValueWithIgnoreMissing() throws Exception { public void testNonExistentWithIgnoreMissing() throws Exception { UserAgentProcessor processor = new UserAgentProcessor(randomAlphaOfLength(10), "source_field", "target_field", null, - EnumSet.allOf(UserAgentProcessor.Property.class), true); + EnumSet.allOf(UserAgentProcessor.Property.class), true, true); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); processor.execute(ingestDocument); @@ -72,7 +72,7 @@ public void testNonExistentWithIgnoreMissing() throws Exception { public void testNullWithoutIgnoreMissing() throws Exception { UserAgentProcessor processor = new UserAgentProcessor(randomAlphaOfLength(10), "source_field", "target_field", null, - EnumSet.allOf(UserAgentProcessor.Property.class), false); + EnumSet.allOf(UserAgentProcessor.Property.class), false, true); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); @@ -82,7 +82,7 @@ public void testNullWithoutIgnoreMissing() throws Exception { public void testNonExistentWithoutIgnoreMissing() throws Exception { UserAgentProcessor processor = new UserAgentProcessor(randomAlphaOfLength(10), "source_field", "target_field", null, - EnumSet.allOf(UserAgentProcessor.Property.class), false); + EnumSet.allOf(UserAgentProcessor.Property.class), false, true); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); @@ -186,6 +186,7 @@ public void testUnknown() throws Exception { assertNull(target.get("build")); assertNull(target.get("os")); + Map device = new HashMap<>(); device.put("name", "Other"); assertThat(target.get("device"), is(device)); diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml index a7fe57c557008..f629761bd90a5 100644 --- a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml +++ b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/20_useragent_processor.yml @@ -46,6 +46,7 @@ { "user_agent" : { "field" : "field1", + "ecs": true, "target_field": "field2", "properties": ["os"] } @@ -69,5 +70,62 @@ - match: { _source.field2.os.full: "Mac OS X 10.9.2" } - is_false: _source.user_agent - is_false: _source.field2.name + - is_false: _source.field2.os_name + - is_false: _source.field2.os_major + - is_false: _source.field2.os_minor + - is_false: _source.field2.major + - is_false: _source.field2.minor + - is_false: _source.field2.patch + - is_false: _source.field2.device + +--- +"Test user agent processor with non-ECS schema": + - skip: + features: warnings + + - do: + warnings: + - "setting [ecs] to false for non-common schema format is deprecated and will be removed in 8.0, set to true or remove to use the non-deprecated format" + - "the [os_major] property is deprecated for the user-agent processor" + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "user_agent" : { + "field" : "field1", + "ecs": false, + "target_field": "field2", + "properties": ["os", "os_major"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {field1: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.149 Safari/537.36"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.field1: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.149 Safari/537.36" } + - match: { _source.field2.os: "Mac OS X 10.9.2" } + - match: { _source.field2.os_major: "10" } + - is_false: _source.user_agent + - is_false: _source.field2.name + - is_false: _source.field2.os_name + - is_false: _source.field2.os_minor + - is_false: _source.field2.major + - is_false: _source.field2.minor + - is_false: _source.field2.patch - is_false: _source.field2.device - - is_false: _source.field2.original diff --git a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml index 763bea0ee4da0..a2cd419dd69b5 100644 --- a/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml +++ b/modules/ingest-user-agent/src/test/resources/rest-api-spec/test/ingest-useragent/30_custom_regex.yml @@ -10,6 +10,7 @@ { "user_agent" : { "field": "field1", + "ecs": true, "regex_file": "test-regexes.yml" } } diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index fa128372b2467..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c84431b751d851f484f2799f6dcb9110113b1958 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..6174d2bce6456 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0.jar.sha1 @@ -0,0 +1 @@ +106b35cf2739f7d2350f3ef5c107d9d066d17cd6 \ No newline at end of file diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index c35127e1c91d5..9beabbffbbc19 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -37,8 +37,8 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } -unitTest { - jvmArg '-XX:-OmitStackTraceInFastThrow' +test { + jvmArgs '-XX:-OmitStackTraceInFastThrow' } /* Build Javadoc for the Java classes in Painless's public API that are in the diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 92fd70411f74c..7e43a242a23a9 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -37,4 +37,4 @@ dependencies { } // no tests...yet? -unitTest.enabled = false +test.enabled = false diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java index 7beddc13ca598..c99ed67e14bb5 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java @@ -169,10 +169,9 @@ public void parse(ParseContext context) throws IOException { buf[offset+2] = (byte) (intValue >> 8); buf[offset+3] = (byte) intValue; offset += INT_BYTES; - dim++; - if (dim >= MAX_DIMS_COUNT) { + if (dim++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } BinaryDocValuesField field = new BinaryDocValuesField(fieldType().name(), new BytesRef(buf, 0, offset)); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java index f7288d5039390..b1804f472fe70 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java @@ -178,10 +178,9 @@ public void parse(ParseContext context) throws IOException { } dims[dimCount] = dim; values[dimCount] = value; - dimCount ++; - if (dimCount >= MAX_DIMS_COUNT) { + if (dimCount++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } else { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java index 2239c99a310f5..cf6fc99657756 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java @@ -30,18 +30,19 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.hamcrest.Matchers; +import org.junit.Before; +import java.io.IOException; import java.util.Collection; -public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; - @Override - protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); - } +public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { + private DocumentMapper mapper; - public void testDefaults() throws Exception { + @Before + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -53,10 +54,15 @@ public void testDefaults() throws Exception { .endObject() .endObject() .endObject()); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); + } - DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + public void testDefaults() throws Exception { float[] expectedArray = {-12.1f, 100.7f, -4}; ParsedDocument doc1 = mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -66,7 +72,7 @@ public void testDefaults() throws Exception { XContentType.JSON)); IndexableField[] fields = doc1.rootDoc().getFields("my-dense-vector"); assertEquals(1, fields.length); - assertThat(fields[0], Matchers.instanceOf(BinaryDocValuesField.class)); + assertThat(fields[0], instanceOf(BinaryDocValuesField.class)); // assert that after decoding the indexed value is equal to expected BytesRef vectorBR = ((BinaryDocValuesField) fields[0]).binaryValue(); @@ -78,4 +84,22 @@ public void testDefaults() throws Exception { 0.001f ); } + + public void testDimensionLimit() throws IOException { + float[] validVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT]; + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + float[] invalidVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT + 1]; + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java index 06710e39592cc..754a6f1a31803 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java @@ -33,7 +33,12 @@ import org.hamcrest.Matchers; import org.junit.Before; +import java.io.IOException; import java.util.Collection; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -42,7 +47,7 @@ public class SparseVectorFieldMapperTests extends ESSingleNodeTestCase { private DocumentMapper mapper; @Before - public void setup() throws Exception { + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -100,7 +105,7 @@ public void testDefaults() throws Exception { ); } - public void testErrors() { + public void testDimensionNumberValidation() { // 1. test for an error on negative dimension MapperParsingException e = expectThrows(MapperParsingException.class, () -> { mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference @@ -161,4 +166,28 @@ public void testErrors() { assertThat(e.getCause().getMessage(), containsString( "takes an object that maps a dimension number to a float, but got unexpected token [START_ARRAY]")); } + + public void testDimensionLimit() throws IOException { + Map validVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + Map invalidVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT + 1) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 2cb37cb794dda..44200823b6d66 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -96,6 +96,10 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder { if (getResponse.isExists() == false) { throw new ResourceNotFoundException( - "indexed document [{}/{}/{}] couldn't be found", indexedDocumentIndex, indexedDocumentType, indexedDocumentId + "indexed document [{}{}/{}] couldn't be found", indexedDocumentIndex, + indexedDocumentType == null ? "" : "/" + indexedDocumentType, indexedDocumentId ); } if(getResponse.isSourceEmpty()) { throw new IllegalArgumentException( - "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentType + "/" + indexedDocumentId - + "] source disabled" + "indexed document [" + indexedDocumentIndex + (indexedDocumentType == null ? "" : "/" + indexedDocumentType) + + "/" + indexedDocumentId + "] source disabled" ); } documentSupplier.set(getResponse.getSourceAsBytesRef()); @@ -554,7 +563,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { // Call nowInMillis() so that this query becomes un-cacheable since we // can't be sure that it doesn't use now or scripts context.nowInMillis(); - if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null || documentSupplier != null) { + if (indexedDocumentIndex != null || indexedDocumentId != null || documentSupplier != null) { throw new IllegalStateException("query builder must be rewritten first"); } @@ -577,7 +586,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { final MapperService mapperService = context.getMapperService(); String type = mapperService.documentMapper().type(); if (documentType != null) { - deprecationLogger.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); + deprecationLogger.deprecatedAndMaybeLog("percolate_with_document_type", DOCUMENT_TYPE_DEPRECATION_MESSAGE); if (documentType.equals(type) == false) { throw new IllegalArgumentException("specified document_type [" + documentType + "] is not equal to the actual type [" + type + "]"); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index e4731919fa7d0..e697c2f66eed8 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -69,7 +70,6 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase rewriteAndFetch(pqb, createShardContext())); - String expectedString = "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentType + "/" + + String expectedString = "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentId + "] couldn't be found"; assertThat(e.getMessage() , equalTo(expectedString)); } @@ -220,11 +219,6 @@ public void testRequiredParameters() { }); assertThat(e.getMessage(), equalTo("[index] is a required argument")); - e = expectThrows(IllegalArgumentException.class, () -> { - new PercolateQueryBuilder("_field", "_document_type", "_index", null, "_id", null, null, null); - }); - assertThat(e.getMessage(), equalTo("[type] is a required argument")); - e = expectThrows(IllegalArgumentException.class, () -> { new PercolateQueryBuilder("_field", "_document_type", "_index", "_type", null, null, null, null); }); @@ -237,6 +231,39 @@ public void testFromJsonNoDocumentType() throws IOException { queryBuilder.toQuery(queryShardContext); } + public void testFromJsonWithDocumentType() throws IOException { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder queryBuilder = parseQuery("{\"percolate\" : { \"document\": {}, \"document_type\":\"" + docType + "\", \"field\":\"" + + queryField + "\"}}"); + queryBuilder.toQuery(queryShardContext); + assertWarnings(PercolateQueryBuilder.DOCUMENT_TYPE_DEPRECATION_MESSAGE); + } + + public void testFromJsonNoType() throws IOException { + indexedDocumentIndex = randomAlphaOfLength(4); + indexedDocumentId = randomAlphaOfLength(4); + indexedDocumentVersion = Versions.MATCH_ANY; + documentSource = Collections.singletonList(randomSource(new HashSet<>())); + + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder queryBuilder = parseQuery("{\"percolate\" : { \"index\": \"" + indexedDocumentIndex + "\", \"id\": \"" + + indexedDocumentId + "\", \"field\":\"" + queryField + "\"}}"); + rewriteAndFetch(queryBuilder, queryShardContext).toQuery(queryShardContext); + } + + public void testFromJsonWithType() throws IOException { + indexedDocumentIndex = randomAlphaOfLength(4); + indexedDocumentId = randomAlphaOfLength(4); + indexedDocumentVersion = Versions.MATCH_ANY; + documentSource = Collections.singletonList(randomSource(new HashSet<>())); + + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder queryBuilder = parseQuery("{\"percolate\" : { \"index\": \"" + indexedDocumentIndex + + "\", \"type\": \"_doc\", \"id\": \"" + indexedDocumentId + "\", \"field\":\"" + queryField + "\"}}"); + rewriteAndFetch(queryBuilder, queryShardContext).toQuery(queryShardContext); + assertWarnings(PercolateQueryBuilder.TYPE_DEPRECATION_MESSAGE); + } + public void testBothDocumentAndDocumentsSpecified() throws IOException { expectThrows(IllegalArgumentException.class, () -> parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}")); diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml index 3ed2ed64d782c..08d344687adc7 100644 --- a/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml +++ b/modules/percolator/src/test/resources/rest-api-spec/test/10_basic.yml @@ -1,5 +1,10 @@ --- "Test percolator basics via rest": + + - skip: + version: " - 6.99.99" + reason: types are required in requests before 7.0.0 + - do: indices.create: index: queries_index @@ -11,6 +16,15 @@ foo: type: keyword + - do: + indices.create: + index: documents_index + body: + mappings: + properties: + foo: + type: keyword + - do: index: index: queries_index @@ -19,6 +33,13 @@ query: match_all: {} + - do: + index: + index: documents_index + id: some_id + body: + foo: bar + - do: indices.refresh: {} @@ -44,3 +65,26 @@ document: foo: bar - match: { responses.0.hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + - query: + percolate: + field: query + index: documents_index + id: some_id + - match: { hits.total: 1 } + + - do: + msearch: + rest_total_hits_as_int: true + body: + - index: queries_index + - query: + percolate: + field: query + index: documents_index + id: some_id + - match: { responses.0.hits.total: 1 } diff --git a/modules/percolator/src/test/resources/rest-api-spec/test/11_basic_with_types.yml b/modules/percolator/src/test/resources/rest-api-spec/test/11_basic_with_types.yml new file mode 100644 index 0000000000000..896d2d514bcb9 --- /dev/null +++ b/modules/percolator/src/test/resources/rest-api-spec/test/11_basic_with_types.yml @@ -0,0 +1,96 @@ +--- +"Test percolator basics via rest": + + - do: + indices.create: + include_type_name: true + index: queries_index + body: + mappings: + queries_type: + properties: + query: + type: percolator + foo: + type: keyword + + - do: + indices.create: + include_type_name: true + index: documents_index + body: + mappings: + documents_type: + properties: + foo: + type: keyword + + - do: + index: + index: queries_index + type: queries_type + id: test_percolator + body: + query: + match_all: {} + + - do: + index: + index: documents_index + type: documents_type + id: some_id + body: + foo: bar + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + body: + - query: + percolate: + field: query + document: + document_type: queries_type + foo: bar + - match: { hits.total: 1 } + + - do: + msearch: + rest_total_hits_as_int: true + body: + - index: queries_index + - query: + percolate: + field: query + document_type: queries_type + document: + foo: bar + - match: { responses.0.hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + - query: + percolate: + field: query + index: documents_index + type: documents_type + id: some_id + - match: { hits.total: 1 } + + - do: + msearch: + rest_total_hits_as_int: true + body: + - index: queries_index + - query: + percolate: + field: query + index: documents_index + type: documents_type + id: some_id + - match: { responses.0.hits.total: 1 } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 7d3ec94811c5a..0dbbb9f90f1fa 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -86,7 +86,7 @@ public RankEvalSpec getRankEvalSpec() { } /** - * Set the the specification of the ranking evaluation. + * Set the specification of the ranking evaluation. */ public void setRankEvalSpec(RankEvalSpec task) { this.rankingEvaluationSpec = task; diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 36f327a5b6c30..782ff8f922cb0 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -45,7 +45,7 @@ run { setting 'reindex.remote.whitelist', '127.0.0.1:*' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the * same JVM randomize processors and will step on each other if we allow them to @@ -138,7 +138,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { /* Use a closure on the string to delay evaluation until right before we * run the integration tests so that we can be sure that the file is * ready. */ - systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + nonInputProperties.systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index e4b6b6a07d9ce..4928e4fd01f26 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -186,7 +186,7 @@ static void validateAgainstAliases(SearchRequest source, IndexRequest destinatio * it. This is the same sort of dance that TransportIndexRequest * uses to decide to autocreate the index. */ - target = indexNameExpressionResolver.concreteIndexNames(clusterState, destination)[0]; + target = indexNameExpressionResolver.concreteWriteIndex(clusterState, destination).getName(); } for (String sourceIndex : indexNameExpressionResolver.concreteIndexNames(clusterState, source)) { if (sourceIndex.equals(target)) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 92b2180f4da6c..bdedc65b7a6d3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -276,7 +276,7 @@ public void testBulkResponseSetsLotsOfStatus() { versionConflicts++; responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, - new VersionConflictEngineException(shardId, "type", "id", "test"))); + new VersionConflictEngineException(shardId, "id", "test"))); continue; } boolean createdResponse; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c077c992beb60..917d196b6e9fb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -81,7 +81,7 @@ public void testAbortOnVersionConflict() throws Exception { BulkByScrollResponse response = copy.get(); assertThat(response, matcher().batches(1).versionConflicts(1).failures(1).created(99)); for (Failure failure: response.getBulkFailures()) { - assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[_doc][")); + assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[")); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index 19c5739bbc6ce..8264d4342c993 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -48,6 +49,9 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder() .put(index("target", "target_alias", "target_multi"), true) .put(index("target2", "target_multi"), true) + .put(index("target_with_write_index", true, "target_multi_with_write_index"), true) + .put(index("target2_without_write_index", "target_multi_with_write_index"), true) + .put(index("qux", false, "target_alias_with_write_index_disabled"), true) .put(index("foo"), true) .put(index("bar"), true) .put(index("baz"), true) @@ -78,12 +82,26 @@ public void testAliasesContainTarget() { succeeds("target", "source", "source2", "source_multi"); } - public void testTargetIsAlias() { + public void testTargetIsAliasToMultipleIndicesWithoutWriteAlias() { Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_multi", "foo")); - assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [[")); - // The index names can come in either order - assertThat(e.getMessage(), containsString("target")); - assertThat(e.getMessage(), containsString("target2")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_multi]. The write index may be explicitly " + + "disabled using is_write_index=false or the alias points to multiple indices without one being designated as a " + + "write index")); + } + + public void testTargetIsAliasWithWriteIndexDisabled() { + Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_alias_with_write_index_disabled", "foo")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_alias_with_write_index_disabled]. " + + "The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one " + + "being designated as a write index")); + succeeds("qux", "foo"); // writing directly into the index of which this is the alias works though + } + + public void testTargetIsWriteAlias() { + succeeds("target_multi_with_write_index", "foo"); + succeeds("target_multi_with_write_index", "target2_without_write_index"); + fails("target_multi_with_write_index", "target_multi_with_write_index"); + fails("target_multi_with_write_index", "target_with_write_index"); } public void testRemoteInfoSkipsValidation() { @@ -97,7 +115,7 @@ public void testRemoteInfoSkipsValidation() { private void fails(String target, String... sources) { Exception e = expectThrows(ActionRequestValidationException.class, () -> succeeds(target, sources)); - assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from [target]")); + assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from")); } private void succeeds(String target, String... sources) { @@ -110,12 +128,16 @@ private void succeeds(RemoteInfo remoteInfo, String target, String... sources) { } private static IndexMetaData index(String name, String... aliases) { + return index(name, null, aliases); + } + + private static IndexMetaData index(String name, @Nullable Boolean writeIndex, String... aliases) { IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder() .put("index.version.created", Version.CURRENT.id) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)); for (String alias: aliases) { - builder.putAlias(AliasMetaData.builder(alias).build()); + builder.putAlias(AliasMetaData.builder(alias).writeIndex(writeIndex).build()); } return builder.build(); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index 6572313308b32..58067cd2cdbbf 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -37,6 +38,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; @@ -191,13 +193,15 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque assertThat(rethrottleResponse.getTasks(), hasSize(1)); response.set(rethrottleResponse); } catch (ElasticsearchException e) { - if (e.getCause() instanceof IllegalArgumentException) { - // We want to retry in this case so we throw an assertion error - logger.info("caught unprepared task, retrying until prepared"); - throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e); - } else { + Throwable unwrapped = ExceptionsHelper.unwrap(e, IllegalArgumentException.class); + if (unwrapped == null) { throw e; } + // We want to retry in this case so we throw an assertion error + assertThat(unwrapped.getMessage(), equalTo("task [" + taskToRethrottle.getId() + + "] has not yet been initialized to the point where it knows how to rethrottle itself")); + logger.info("caught unprepared task, retrying until prepared"); + throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e); } }); diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index dd29e7701ba1c..d11f160bcf571 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -129,7 +129,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -185,7 +185,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml index 312a88ace5e92..9ef6c1a90c400 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml @@ -160,7 +160,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: dest} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index 15bc62214ebfb..08c8465c40960 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -109,7 +109,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -151,7 +151,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 4728e1b0d9eb6..d314ce912ef66 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -83,7 +83,7 @@ public class URLRepository extends BlobStoreRepository { */ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), false, namedXContentRegistry); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 787a4b6e9ecff..e08bba94d57e6 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -47,7 +47,7 @@ dependencyLicenses { mapping from: /netty-.*/, to: 'netty' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 0a9e5f8dfc181..83d4c3419ef64 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -118,6 +118,7 @@ public Settings onNodeStopped(String nodeName) throws IOException { public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); + ensureStableCluster(3); RestClient restClient = getRestClient(); Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodes.get(2))); assertThat(response.getStatusLine().getStatusCode(), is(200)); @@ -131,6 +132,7 @@ public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception { public void testClearVotingTombstonesWaitingForRemoval() throws Exception { internalCluster().setBootstrapMasterNodeIndex(2); List nodes = internalCluster().startNodes(3); + ensureStableCluster(3); RestClient restClient = getRestClient(); String nodeToWithdraw = nodes.get(randomIntBetween(0, 2)); Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodeToWithdraw)); @@ -145,6 +147,7 @@ public void testClearVotingTombstonesWaitingForRemoval() throws Exception { public void testFailsOnUnknownNode() throws Exception { internalCluster().setBootstrapMasterNodeIndex(2); internalCluster().startNodes(3); + ensureStableCluster(3); RestClient restClient = getRestClient(); try { restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/invalid")); @@ -157,4 +160,18 @@ public void testFailsOnUnknownNode() throws Exception { ); } } + + public void testRemoveTwoNodesAtOnce() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(2); + List nodes = internalCluster().startNodes(3); + ensureStableCluster(3); + RestClient restClient = getRestClient(); + Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + + nodes.get(2) + "," + nodes.get(0))); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getEntity().getContentLength(), is(0L)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(2))); + ensureStableCluster(1); + } } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 90132e2c58fcd..19e484c0d2168 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -20,7 +20,7 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis */ esplugin { - description 'The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.' + description 'The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components.' classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' hasClientJar = true } diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 927a881df73f1..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea440f53a9e858c2ed87927c63d57eb70e5af9ee \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..51bd478ce4cf1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0.jar.sha1 @@ -0,0 +1 @@ +1bc195d1b036b59cdf7704f758df8f43c7e229c4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 075bd24f3e609..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b016fdfb8f1ac413d902cd4d244b295c9f01e610 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..6c60a7f069a22 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0.jar.sha1 @@ -0,0 +1 @@ +760d9451396c45fdb271750f4e52acc2ff5e7bb2 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index d5046efc95935..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -395c8291751ffa7fbbb96faf578408a33a34ad1d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..e52dfa93565a1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0.jar.sha1 @@ -0,0 +1 @@ +5bb5a241d95d9753a5e9fb341476306cb96a34a9 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 19a0a9a198ce0..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6957c71604356a1fbf0e13794595e4ea42126dd7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..f802e93d17027 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0.jar.sha1 @@ -0,0 +1 @@ +e3384e1b5c1e5f39026d3d6e48e00df84f614911 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 91597e3941158..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09f42235a8daca4ca8ea604314c2eb9de51b9e98 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..0c427cb38be75 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0.jar.sha1 @@ -0,0 +1 @@ +3e413379d7e61eb43dee64ec5e756cbeb3478a05 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index a868f7d5a053f..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7cf7eeb685a2060e97f50551236cfcaf39990083 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..ae5d443c7d5ad --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0.jar.sha1 @@ -0,0 +1 @@ +50d81559e2604da31ca7961581fda41257ab0600 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 4f5f80aa96b91..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ff163fb06ec3e47d501b290d8f69cca88a0341cc \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..af556321b7cf2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0.jar.sha1 @@ -0,0 +1 @@ +a0b165cb156178a0a91baa4b8d2f4c37278d92e0 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index bc6f6b97835eb..9e080af0ed565 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -48,7 +48,7 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public AzureDiscoveryPlugin(Settings settings) { this.settings = settings; - deprecationLogger.deprecated("azure classic discovery plugin is deprecated. Use azure arm discovery plugin instead"); + deprecationLogger.deprecated("azure classic discovery plugin is deprecated."); logger.trace("starting azure classic discovery plugin..."); } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 31a20f09f0b55..107d1ecdde363 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -33,8 +33,8 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' - compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11.3' + compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } dependencyLicenses { @@ -64,7 +64,7 @@ task writeTestJavaPolicy { } } -unitTest { +test { dependsOn writeTestJavaPolicy // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.6.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.6.0.jar.sha1 deleted file mode 100644 index bc4cae402d631..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0990e2e812ac6639b6ce955c91b13228500476e \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.8.11.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.8.11.jar.sha1 new file mode 100644 index 0000000000000..30e7d1a7b1a74 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.8.11.jar.sha1 @@ -0,0 +1 @@ +391de20b4e29cb3fb07d2454ace64be2c82ac91f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.6.7.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.6.7.1.jar.sha1 deleted file mode 100644 index 7d82dbddc52d0..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.6.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -306775aeb5164835a1dcbdf3f945587045cfb3b5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.8.11.3.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.8.11.3.jar.sha1 new file mode 100644 index 0000000000000..253a1361931c3 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.8.11.3.jar.sha1 @@ -0,0 +1 @@ +844df5aba5a1a56e00905b165b12bb34116ee858 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/main/plugin-metadata/plugin-security.policy b/plugins/discovery-ec2/src/main/plugin-metadata/plugin-security.policy index 8f0278de3923c..f29319b4f770d 100644 --- a/plugins/discovery-ec2/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/discovery-ec2/src/main/plugin-metadata/plugin-security.policy @@ -27,4 +27,6 @@ grant { // ec2 client opens socket connections for discovery permission java.net.SocketPermission "*", "connect"; + + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 11d4a7e25fe46..697cc3780a1fd 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -29,7 +29,7 @@ check { dependsOn 'qa:gce:check' } -unitTest { +test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index 7c0d694e1b5ff..977e467391d8b 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -32,4 +32,4 @@ integTestCluster { } // this plugin has no unit tests, only rest tests -tasks.unitTest.enabled = false +tasks.test.enabled = false diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index d68a9ee397645..95928c472ca0d 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -35,4 +35,4 @@ if (System.getProperty('tests.distribution') == null) { integTestCluster.distribution = 'oss' } -unitTest.enabled = false +test.enabled = false diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index 358d2cb00ab14..67819ce95333f 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -191,7 +191,7 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r } data = ((AtomicNumericFieldData) fd).getDoubleValues(); } - if (false == data.advanceExact(topDocs.scoreDocs[i].doc)) { + if (false == data.advanceExact(topDocs.scoreDocs[i].doc - leaf.docBase)) { throw new IllegalArgumentException("document [" + topDocs.scoreDocs[i].doc + "] does not have the field [" + context.factorField.getFieldName() + "]"); } diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index 025e570bedea4..98dd093ac17a3 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -27,7 +27,7 @@ esplugin { } // No unit tests in this example -unitTest.enabled = false +test.enabled = false task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn testClasses @@ -40,7 +40,7 @@ integTestCluster { dependsOn exampleFixture } integTestRunner { - systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" } testingConventions.naming { diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index b054ab47a3198..e9da62acdcff4 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -26,5 +26,5 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -unitTest.enabled = false +test.enabled = false diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 18289d321a513..a616b1743b731 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -19,11 +19,6 @@ package org.elasticsearch.example.expertscript; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Collection; -import java.util.Map; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; @@ -36,6 +31,11 @@ import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.search.lookup.SearchLookup; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Map; + /** * An example script plugin that adds a {@link ScriptEngine} implementing expert scoring. */ diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index d0d227e221b68..f869e4872ddc3 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -5,11 +5,13 @@ esplugin { description 'An example spi extension plugin for security that implements an Authorization Engine' classname 'org.elasticsearch.example.AuthorizationEnginePlugin' extendedPlugins = ['x-pack-security'] + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') } dependencies { - compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile "org.elasticsearch.client.x-pack-transport:${version}" + compileOnly "org.elasticsearch.plugin:x-pack-core:${versions.elasticsearch}" + testCompile "org.elasticsearch.client:x-pack-transport:${versions.elasticsearch}" } @@ -19,6 +21,8 @@ integTestRunner { integTestCluster { dependsOn buildZip + distribution = 'default' + setting 'xpack.security.enabled', 'true' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f75d7884db5aa..316bc850aec6d 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -25,7 +25,6 @@ esplugin { versions << [ 'tika': '1.19.1', 'pdfbox': '2.0.12', - 'bouncycastle': '1.59', 'poi': '4.0.0', 'mime4j': '0.8.2' ] diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.59.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.59.jar.sha1 deleted file mode 100644 index dde0a237a186b..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db389ade95f48592908a84e7050a691c8834723c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.61.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..a41f43da5908b --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +69a1ea4bead1baadd1a1e00526de050c62ea7d82 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.59.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.59.jar.sha1 deleted file mode 100644 index be5e561ee9a76..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cef0aab8a4bb849a8476c058ce3ff302aba3fff \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.61.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..7ae081447a929 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +89bb3aa5b98b48e584eee2a7401b7682a46779b4 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.59.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.59.jar.sha1 deleted file mode 100644 index aa42dbb8f6906..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2507204241ab450456bdb8e8c0a8f986e418bd99 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.61.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..0ccfcd61a0e59 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +00df4b474e71be02c1349c3292d98886f888d1f7 \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 2aadfd2218590..482e12e72d1b6 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -54,6 +54,7 @@ import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import java.io.IOException; import java.io.Reader; @@ -315,47 +316,13 @@ public AnnotationToken getAnnotation(int index) { // When asked to tokenize plain-text versions by the highlighter it tokenizes the // original markup form in order to inject annotations. public static final class AnnotatedHighlighterAnalyzer extends AnalyzerWrapper { - private Analyzer delegate; - private AnnotatedText[] annotations; - public AnnotatedHighlighterAnalyzer(Analyzer delegate){ + private final Analyzer delegate; + private final HitContext hitContext; + public AnnotatedHighlighterAnalyzer(Analyzer delegate, HitContext hitContext){ super(delegate.getReuseStrategy()); this.delegate = delegate; + this.hitContext = hitContext; } - - public void init(String[] markedUpFieldValues) { - this.annotations = new AnnotatedText[markedUpFieldValues.length]; - for (int i = 0; i < markedUpFieldValues.length; i++) { - annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]); - } - } - - public String [] getPlainTextValuesForHighlighter(){ - String [] result = new String[annotations.length]; - for (int i = 0; i < annotations.length; i++) { - result[i] = annotations[i].textMinusMarkup; - } - return result; - } - - public AnnotationToken[] getIntersectingAnnotations(int start, int end) { - List intersectingAnnotations = new ArrayList<>(); - int fieldValueOffset =0; - for (AnnotatedText fieldValueAnnotations : this.annotations) { - //This is called from a highlighter where all of the field values are concatenated - // so each annotation offset will need to be adjusted so that it takes into account - // the previous values AND the MULTIVAL delimiter - for (AnnotationToken token : fieldValueAnnotations.annotations) { - if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) { - intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, - token.endOffset + fieldValueOffset, token.value)); - } - } - //add 1 for the fieldvalue separator character - fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; - } - return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); - } - @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; @@ -364,10 +331,11 @@ public Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); + AnnotatedText[] annotations = (AnnotatedText[]) hitContext.cache().get(AnnotatedText.class.getName()); AtomicInteger readerNum = new AtomicInteger(0); return new TokenStreamComponents(r -> { String plainText = readToString(r); - AnnotatedText at = this.annotations[readerNum.getAndIncrement()]; + AnnotatedText at = annotations[readerNum.getAndIncrement()]; assert at.textMinusMarkup.equals(plainText); injector.setAnnotations(at); components.getSource().accept(new StringReader(at.textMinusMarkup)); diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java index ad1acc85031dd..7d360dd0b9bac 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java @@ -23,7 +23,7 @@ import org.apache.lucene.search.uhighlight.Passage; import org.apache.lucene.search.uhighlight.PassageFormatter; import org.apache.lucene.search.uhighlight.Snippet; -import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken; import java.io.UnsupportedEncodingException; @@ -42,11 +42,11 @@ public class AnnotatedPassageFormatter extends PassageFormatter { public static final String SEARCH_HIT_TYPE = "_hit_term"; private final Encoder encoder; - private AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer; + AnnotatedText[] annotations; - public AnnotatedPassageFormatter(AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer, Encoder encoder) { - this.annotatedHighlighterAnalyzer = annotatedHighlighterAnalyzer; + public AnnotatedPassageFormatter(AnnotatedText[] annotations, Encoder encoder) { this.encoder = encoder; + this.annotations = annotations; } static class MarkupPassage { @@ -158,7 +158,7 @@ public Snippet[] format(Passage[] passages, String content) { int pos; int j = 0; for (Passage passage : passages) { - AnnotationToken [] annotations = annotatedHighlighterAnalyzer.getIntersectingAnnotations(passage.getStartOffset(), + AnnotationToken [] annotations = getIntersectingAnnotations(passage.getStartOffset(), passage.getEndOffset()); MarkupPassage mergedMarkup = mergeAnnotations(annotations, passage); @@ -194,6 +194,27 @@ public Snippet[] format(Passage[] passages, String content) { } return snippets; } + + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { + List intersectingAnnotations = new ArrayList<>(); + int fieldValueOffset =0; + for (AnnotatedText fieldValueAnnotations : this.annotations) { + //This is called from a highlighter where all of the field values are concatenated + // so each annotation offset will need to be adjusted so that it takes into account + // the previous values AND the MULTIVAL delimiter + for (int i = 0; i < fieldValueAnnotations.numAnnotations(); i++) { + AnnotationToken token = fieldValueAnnotations.getAnnotation(i); + if (token.intersects(start - fieldValueOffset, end - fieldValueOffset)) { + intersectingAnnotations + .add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + + fieldValueOffset, token.value)); + } + } + //add 1 for the fieldvalue separator character + fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; + } + return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); + } private void append(StringBuilder dest, String content, int start, int end) { dest.append(encoder.encodeText(content.substring(start, end))); diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java index d93316c78921a..2ba7838b90950 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java @@ -25,24 +25,22 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; public class AnnotatedTextHighlighter extends UnifiedHighlighter { public static final String NAME = "annotated"; - - AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer = null; @Override - protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { - annotatedHighlighterAnalyzer = new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type)); - return annotatedHighlighterAnalyzer; + protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) { + return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type, hitContext), hitContext); } // Convert the marked-up values held on-disk to plain-text versions for highlighting @@ -51,14 +49,26 @@ protected List loadFieldValues(MappedFieldType fieldType, Field field, S throws IOException { List fieldValues = super.loadFieldValues(fieldType, field, context, hitContext); String[] fieldValuesAsString = fieldValues.toArray(new String[fieldValues.size()]); - annotatedHighlighterAnalyzer.init(fieldValuesAsString); - return Arrays.asList((Object[]) annotatedHighlighterAnalyzer.getPlainTextValuesForHighlighter()); + + AnnotatedText[] annotations = new AnnotatedText[fieldValuesAsString.length]; + for (int i = 0; i < fieldValuesAsString.length; i++) { + annotations[i] = AnnotatedText.parse(fieldValuesAsString[i]); + } + // Store the annotations in the hitContext + hitContext.cache().put(AnnotatedText.class.getName(), annotations); + + ArrayList result = new ArrayList<>(annotations.length); + for (int i = 0; i < annotations.length; i++) { + result.add(annotations[i].textMinusMarkup); + } + return result; } @Override - protected PassageFormatter getPassageFormatter(SearchContextHighlight.Field field, Encoder encoder) { - return new AnnotatedPassageFormatter(annotatedHighlighterAnalyzer, encoder); - + protected PassageFormatter getPassageFormatter(HitContext hitContext,SearchContextHighlight.Field field, Encoder encoder) { + // Retrieve the annotations from the hitContext + AnnotatedText[] annotations = (AnnotatedText[]) hitContext.cache().get(AnnotatedText.class.getName()); + return new AnnotatedPassageFormatter(annotations, encoder); } } diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java index ca29521802fe2..8630dc870f8c7 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -40,18 +40,20 @@ import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.uhighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter; -import org.apache.lucene.search.uhighlight.PassageFormatter; import org.apache.lucene.search.uhighlight.Snippet; import org.apache.lucene.search.uhighlight.SplittingBreakIterator; import org.apache.lucene.store.Directory; import org.elasticsearch.common.Strings; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer; +import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotationAnalyzerWrapper; +import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import org.elasticsearch.search.fetch.subphase.highlight.AnnotatedPassageFormatter; import org.elasticsearch.test.ESTestCase; import java.net.URLEncoder; import java.text.BreakIterator; +import java.util.ArrayList; import java.util.Locale; import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; @@ -63,13 +65,24 @@ private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, Query query, Locale locale, BreakIterator breakIterator, int noMatchSize, String[] expectedPassages) throws Exception { + // Annotated fields wrap the usual analyzer with one that injects extra tokens Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer()); - AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); - hiliteAnalyzer.init(markedUpInputs); - PassageFormatter passageFormatter = new AnnotatedPassageFormatter(hiliteAnalyzer,new DefaultEncoder()); - String []plainTextForHighlighter = hiliteAnalyzer.getPlainTextValuesForHighlighter(); + HitContext mockHitContext = new HitContext(); + AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer, mockHitContext); + + AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length]; + for (int i = 0; i < markedUpInputs.length; i++) { + annotations[i] = AnnotatedText.parse(markedUpInputs[i]); + } + mockHitContext.cache().put(AnnotatedText.class.getName(), annotations); + AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(annotations,new DefaultEncoder()); + + ArrayList plainTextForHighlighter = new ArrayList<>(annotations.length); + for (int i = 0; i < annotations.length; i++) { + plainTextForHighlighter.add(annotations[i].textMinusMarkup); + } Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer); @@ -94,7 +107,7 @@ private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, iw.close(); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); assertThat(topDocs.totalHits.value, equalTo(1L)); - String rawValue = Strings.arrayToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); + String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, hiliteAnalyzer, null, passageFormatter, locale, diff --git a/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml index a27e6c44a7fa2..63516516252a9 100644 --- a/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml +++ b/plugins/mapper-annotated-text/src/test/resources/rest-api-spec/test/mapper_annotatedtext/10_basic.yml @@ -42,3 +42,77 @@ body: { "query" : {"term" : { "text" : "quick" } }, "highlight" : { "type" : "annotated", "require_field_match": false, "fields" : { "text" : {} } } } - match: {hits.hits.0.highlight.text.0: "The [quick](_hit_term=quick) brown fox is brown."} + +--- +"issue 39395 thread safety issue -requires multiple calls to reveal": + - skip: + version: " - 6.4.99" + reason: Annotated text type introduced in 6.5.0 + + - do: + indices.create: + index: annotated + body: + settings: + number_of_shards: "5" + number_of_replicas: "0" + mappings: + properties: + my_field: + type: annotated_text + + - do: + index: + index: annotated + id: 1 + body: + "my_field" : "[A](~MARK0&~MARK0) [B](~MARK1)" + - do: + index: + index: annotated + id: 2 + body: + "my_field" : "[A](~MARK0) [C](~MARK2)" + refresh: true + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + + - do: + search: + request_cache: false + body: { "query" : {"match_phrase" : { "my_field" : {"query": "~MARK0", "analyzer": "whitespace"} } }, "highlight" : { "type" : "annotated", "fields" : { "my_field" : {} } } } + - match: {_shards.failed: 0} + diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml index df3e1b3216ab9..92866190959e6 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml +++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml @@ -23,7 +23,7 @@ setup: - match: { repository.settings.container: ${container} } - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.base_path : "${base_path}" } # Index documents - do: diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 28af721b1fdd6..078e0e698aa51 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,7 +21,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -82,16 +81,14 @@ public static final class Repository { private final BlobPath basePath; private final ByteSizeValue chunkSize; - private final boolean compress; private final Environment environment; private final AzureStorageService storageService; private final boolean readonly; public AzureRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, AzureStorageService storageService) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), Repository.COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); - this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); this.environment = environment; this.storageService = storageService; @@ -132,7 +129,7 @@ protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageExc logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", - blobStore, chunkSize, compress, basePath)); + blobStore, chunkSize, isCompress(), basePath)); return blobStore; } @@ -141,14 +138,6 @@ protected BlobPath basePath() { return basePath; } - /** - * {@inheritDoc} - */ - @Override - protected boolean isCompress() { - return compress; - } - /** * {@inheritDoc} */ diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e7c4498633145..e5af9081ca189 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -23,8 +23,8 @@ esplugin { } dependencies { - compile 'com.google.cloud:google-cloud-storage:1.55.0' - compile 'com.google.cloud:google-cloud-core:1.55.0' + compile 'com.google.cloud:google-cloud-storage:1.59.0' + compile 'com.google.cloud:google-cloud-core:1.59.0' compile 'com.google.guava:guava:20.0' compile "joda-time:joda-time:${versions.joda}" compile 'com.google.http-client:google-http-client:1.24.1' @@ -40,7 +40,7 @@ dependencies { compile 'com.google.code.gson:gson:2.7' compile 'com.google.api.grpc:proto-google-common-protos:1.12.0' compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0' - compile 'com.google.cloud:google-cloud-core-http:1.55.0' + compile 'com.google.cloud:google-cloud-core-http:1.59.0' compile 'com.google.auth:google-auth-library-credentials:0.10.0' compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0' compile 'com.google.oauth-client:google-oauth-client:1.24.1' diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.55.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.55.0.jar.sha1 deleted file mode 100644 index 7c00bf52c4190..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-1.55.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e50a2a559128b7938cfd6598753d4c7383472dc \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 new file mode 100644 index 0000000000000..20e3b0c782dfd --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.59.0.jar.sha1 @@ -0,0 +1 @@ +f2d0c00917660b244da514f82cba96f7697f2c82 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.55.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.55.0.jar.sha1 deleted file mode 100644 index f4179201bcca2..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-1.55.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f26862445efffd8cb3a7f4b1f2a91b7c5143ee1f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 new file mode 100644 index 0000000000000..ab4c7b7dca9c5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.59.0.jar.sha1 @@ -0,0 +1 @@ +e2a094ec3e8acb15b99f2d4bd42ac9bbc7d9f33e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.55.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.55.0.jar.sha1 deleted file mode 100644 index abe0065ab5c81..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-storage-1.55.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ca19f55eeb96609243bf3a15fdafd497432f6673 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 new file mode 100644 index 0000000000000..0f5a8633bd028 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.59.0.jar.sha1 @@ -0,0 +1 @@ +23dc0edf739ff1fb5a91fbddd7bd1f2cbfe0f827 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 8e9e5656f25b4..3192691d84389 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -62,7 +62,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { private final Settings settings; private final GoogleCloudStorageService storageService; private final BlobPath basePath; - private final boolean compress; private final ByteSizeValue chunkSize; private final String bucket; private final String clientName; @@ -70,7 +69,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), getSetting(COMPRESS, metadata), namedXContentRegistry); this.settings = environment.settings(); this.storageService = storageService; @@ -85,11 +84,10 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { this.basePath = BlobPath.cleanPath(); } - this.compress = getSetting(COMPRESS, metadata); this.chunkSize = getSetting(CHUNK_SIZE, metadata); this.bucket = getSetting(BUCKET, metadata); this.clientName = CLIENT_NAME.get(metadata.settings()); - logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); + logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, isCompress()); } @Override @@ -102,11 +100,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index db166a228b576..0e3ecde69c4f0 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.junit.AfterClass; +import org.junit.After; import java.util.Collection; import java.util.Collections; @@ -67,8 +67,8 @@ protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(GoogleCloudStorageRepository.class)); } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 34323fb930fce..24b03621eba9a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -24,18 +24,20 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths - + +apply plugin: 'elasticsearch.test.fixtures' + esplugin { description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' } -apply plugin: 'elasticsearch.vagrantsupport' - versions << [ 'hadoop2': '2.8.1' ] +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" + configurations { hdfsFixture } @@ -68,67 +70,28 @@ dependencyLicenses { mapping from: /hadoop-.*/, to: 'hadoop' } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -task krb5AddPrincipals { - dependsOn krb5kdcFixture -} - -List principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ] String realm = "BUILD.ELASTIC.CO" -for (String principal : principals) { - Task create = project.tasks.create("addPrincipal#${principal}".replace('/', '_'), org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture - } - krb5AddPrincipals.dependsOn(create) -} // Create HDFS File System Testing Fixtures for HA/Secure combinations for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { project.tasks.create(fixtureName, org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.hdfsFixture + dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" + onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() - } + } final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - dependsOn krb5kdcFixture, krb5AddPrincipals - Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf") - miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5Config}"); + miniHDFSArgs.add("-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")}"); if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED') } @@ -145,9 +108,11 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', // If it's a secure fixture, then set the principal name and keytab locations to use for auth. if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - Path keytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab") miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add("${keytabPath}") + miniHDFSArgs.add( + project(':test:fixtures:krb5kdc-fixture') + .ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab") + ) } args miniHDFSArgs.toArray() @@ -170,10 +135,11 @@ project.afterEvaluate { // If it's a secure cluster, add the keytab as an extra config, and set the krb5 conf in the JVM options. if (integTestTaskName.equals('integTestSecure') || integTestTaskName.equals('integTestSecureHa')) { - Path elasticsearchKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("elasticsearch.keytab").toAbsolutePath() - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - - restIntegTestTask.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}") + String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs") + restIntegTestTask.clusterConfig.extraConfigFile( + "repository-hdfs/krb5.keytab", + "${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}" + ) jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}" if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' @@ -185,13 +151,14 @@ project.afterEvaluate { Task restIntegTestTaskRunner = project.tasks.getByName("${integTestTaskName}Runner") restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" - restIntegTestTaskRunner.jvmArg "-Djava.security.krb5.conf=${krb5conf}" + restIntegTestTaskRunner.jvmArgs "-Djava.security.krb5.conf=${krb5conf}" if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { - restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' + restIntegTestTaskRunner.jvmArgs '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } - - Path hdfsKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab").toAbsolutePath() - restIntegTestTaskRunner.systemProperty "test.krb5.keytab.hdfs", "${hdfsKT}" + restIntegTestTaskRunner.systemProperty ( + "test.krb5.keytab.hdfs", + project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab") + ) } } @@ -255,7 +222,7 @@ if (fixtureSupported) { integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository' // Only include the HA integration tests for the HA test task - integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) + integTestHaRunner.setIncludes(['**/Ha*TestSuiteIT.class']) } else { if (legalPath) { logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") @@ -269,41 +236,25 @@ if (fixtureSupported) { integTestHa.setEnabled(false) } -// Secure HDFS testing relies on the Vagrant based Kerberos fixture. -boolean secureFixtureSupported = false -if (fixtureSupported) { - secureFixtureSupported = project.rootProject.vagrantSupported -} - -if (secureFixtureSupported) { - project.check.dependsOn(integTestSecure) - project.check.dependsOn(integTestSecureHa) +check.dependsOn(integTestSecure, integTestSecureHa) - // Fixture dependencies - integTestSecureCluster.dependsOn secureHdfsFixture, krb5kdcFixture - integTestSecureHaCluster.dependsOn secureHaHdfsFixture, krb5kdcFixture +// Fixture dependencies +integTestSecureCluster.dependsOn secureHdfsFixture +integTestSecureHaCluster.dependsOn secureHaHdfsFixture - // Set the keytab files in the classpath so that we can access them from test code without the security manager - // freaking out. - Path hdfsKeytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - project.dependencies { - testRuntime fileTree(dir: hdfsKeytabPath.toString(), include: ['*.keytab']) - } - - // Run just the secure hdfs rest test suite. - integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' - // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. - integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') - - // Only include the HA integration tests for the HA test task - integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) -} else { - // Security tests unsupported. Don't run these tests. - integTestSecure.enabled = false - integTestSecureHa.enabled = false - testingConventions.enabled = false +// Set the keytab files in the classpath so that we can access them from test code without the security manager +// freaking out. +project.dependencies { + testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab']) } +// Run just the secure hdfs rest test suite. +integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' +// Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. +integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') +// Only include the HA integration tests for the HA test task +integTestSecureHaRunner.setIncludes(['**/Ha*TestSuiteIT.class']) + thirdPartyAudit { ignoreMissingClasses() ignoreViolations ( diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index b588f0d002ccc..bba1b0031c85a 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -58,7 +58,6 @@ public final class HdfsRepository extends BlobStoreRepository { private final Environment environment; private final ByteSizeValue chunkSize; - private final boolean compress; private final BlobPath basePath = BlobPath.cleanPath(); private final URI uri; private final String pathSetting; @@ -69,11 +68,10 @@ public final class HdfsRepository extends BlobStoreRepository { public HdfsRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { - super(metadata, environment.settings(), namedXContentRegistry); + super(metadata, environment.settings(), metadata.settings().getAsBoolean("compress", false), namedXContentRegistry); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); - this.compress = metadata.settings().getAsBoolean("compress", false); String uriSetting = getMetadata().settings().get("uri"); if (Strings.hasText(uriSetting) == false) { @@ -239,11 +237,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 779274cfd5e17..0b14ec4d20054 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -3,7 +3,6 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.RestIntegTestTask -import com.carrotsearch.gradle.junit4.RandomizedTestingTask /* * Licensed to Elasticsearch under one or more contributor @@ -43,8 +42,8 @@ dependencies { compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' - compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile 'com.fasterxml.jackson.core:jackson-databind:2.8.11.3' + compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" compile "joda-time:joda-time:${versions.joda}" @@ -66,14 +65,14 @@ bundlePlugin { } } -task testRepositoryCreds(type: RandomizedTestingTask) { +task testRepositoryCreds(type: Test) { include '**/RepositoryCredentialsTests.class' include '**/S3BlobStoreRepositoryTests.class' systemProperty 'es.allow_insecure_settings', 'true' } project.check.dependsOn(testRepositoryCreds) -unitTest { +test { // these are tested explicitly in separate test tasks exclude '**/*CredentialsTests.class' exclude '**/S3BlobStoreRepositoryTests.class' diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.6.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.6.0.jar.sha1 deleted file mode 100644 index bc4cae402d631..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0990e2e812ac6639b6ce955c91b13228500476e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.8.11.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.8.11.jar.sha1 new file mode 100644 index 0000000000000..30e7d1a7b1a74 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.8.11.jar.sha1 @@ -0,0 +1 @@ +391de20b4e29cb3fb07d2454ace64be2c82ac91f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.6.7.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.6.7.1.jar.sha1 deleted file mode 100644 index 7d82dbddc52d0..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.6.7.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -306775aeb5164835a1dcbdf3f945587045cfb3b5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.8.11.3.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.8.11.3.jar.sha1 new file mode 100644 index 0000000000000..253a1361931c3 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.8.11.3.jar.sha1 @@ -0,0 +1 @@ +844df5aba5a1a56e00905b165b12bb34116ee858 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index b1d29d89a59c0..72ce6f8bf1f3e 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,8 +19,8 @@ package org.elasticsearch.repositories.s3; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -155,8 +155,6 @@ class S3Repository extends BlobStoreRepository { private final ByteSizeValue chunkSize; - private final boolean compress; - private final BlobPath basePath; private final boolean serverSideEncryption; @@ -174,7 +172,7 @@ class S3Repository extends BlobStoreRepository { final Settings settings, final NamedXContentRegistry namedXContentRegistry, final S3Service service) { - super(metadata, settings, namedXContentRegistry); + super(metadata, settings, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry); this.service = service; this.repositoryMetaData = metadata; @@ -187,7 +185,6 @@ class S3Repository extends BlobStoreRepository { this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - this.compress = COMPRESS_SETTING.get(metadata.settings()); // We make sure that chunkSize is bigger or equal than/to bufferSize if (this.chunkSize.getBytes() < bufferSize.getBytes()) { @@ -245,11 +242,6 @@ protected BlobPath basePath() { return basePath; } - @Override - protected boolean isCompress() { - return compress; - } - @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 739452dc178c4..45693795f21cb 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.test.rest.FakeRestRequest; -import org.junit.AfterClass; +import org.junit.After; import org.junit.BeforeClass; import java.util.Collection; @@ -77,8 +77,8 @@ public static void setUpRepositorySettings() { } } - @AfterClass - public static void wipeRepository() { + @After + public void wipeRepository() { blobs.clear(); } diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java index 9bf6daa13fb9d..c2a3af360935c 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java @@ -19,17 +19,19 @@ package org.elasticsearch.index.store; -import java.io.FilterOutputStream; -import java.io.IOException; -import java.nio.channels.Channels; -import java.nio.file.Files; -import java.nio.file.StandardOpenOption; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.OutputStreamIndexOutput; +import java.io.FilterOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.util.Set; + /** * This class is used to wrap an existing {@link org.apache.lucene.store.FSDirectory} so that * the new shard segment files will be opened for Read and Write access. @@ -78,4 +80,10 @@ public void write(byte[] b, int offset, int length) throws IOException { CHUNK_SIZE); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 1bb0ca841bf6b..1cc57c3833a6a 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -16,10 +16,12 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: "nebula.maven-scm" esplugin { description 'The nio transport.' classname 'org.elasticsearch.transport.nio.NioTransportPlugin' + hasClientJar = true } compileJava.options.compilerArgs << "-Xlint:-try" diff --git a/qa/build.gradle b/qa/build.gradle index cbcb1d4580704..f1727f115155d 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -12,9 +12,11 @@ subprojects { Project subproj -> */ repositories { maven { + name "elastic" url "https://artifacts.elastic.co/maven" } maven { + name "elastic-snapshots" url "https://snapshots.elastic.co/maven" } } diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index e280b1d2d1a05..2bf0eae138135 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -127,7 +127,7 @@ private static MockTransportService startTransport( builder.add(node); } ClusterState build = ClusterState.builder(clusterName).nodes(builder.build()).build(); - channel.sendResponse(new ClusterStateResponse(clusterName, build, 0L, false)); + channel.sendResponse(new ClusterStateResponse(clusterName, build, false)); }); newService.start(); newService.acceptIncomingRequests(); diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 3b2e21fd557e7..140df6e283ab8 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -27,11 +27,11 @@ esplugin { integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' - systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" + nonInputProperties.systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" + nonInputProperties.systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } -unitTest.enabled = false +test.enabled = false check.dependsOn integTest diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 16398b380cfe1..737106ee92024 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,14 +21,10 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; -import org.elasticsearch.cli.Terminal; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.logging.JsonLogLine; -import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; -import org.hamcrest.Matchers; import java.io.BufferedReader; import java.io.IOException; @@ -38,19 +34,17 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; -import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; public class DieWithDignityIT extends ESRestTestCase { - + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/43413") public void testDieWithDignity() throws Exception { // deleting the PID file prevents stopping the cluster from failing since it occurs if and only if the PID file exists final Path pidFile = PathUtils.get(System.getProperty("pidfile")); @@ -91,29 +85,29 @@ public void testDieWithDignity() throws Exception { } }); - try { - // parse the logs and ensure that Elasticsearch died with the expected cause - Path path = PathUtils.get(System.getProperty("log")); - try (Stream stream = JsonLogsStream.from(path)) { - final Iterator it = stream.iterator(); + // parse the logs and ensure that Elasticsearch died with the expected cause + final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; + final Iterator it = lines.iterator(); - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final JsonLogLine line = it.next(); - if (isFatalError(line)) { - fatalError = true; - } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { - fatalErrorInThreadExiting = true; - assertThat(line.stacktrace(), - hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); - } + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; + try { + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final String line = it.next(); + if (line.matches(".*ERROR.*o\\.e\\.ExceptionsHelper.*node-0.*fatal error.*")) { + fatalError = true; + } else if (line.matches(".*ERROR.*o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler.*node-0.*" + + "fatal error in thread \\[Thread-\\d+\\], exiting.*")) { + fatalErrorInThreadExiting = true; + assertTrue(it.hasNext()); + assertThat(it.next(), containsString("java.lang.OutOfMemoryError: die with dignity")); } - - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); } + + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); + } catch (AssertionError ae) { Path path = PathUtils.get(System.getProperty("log")); debugLogs(path); @@ -121,34 +115,12 @@ public void testDieWithDignity() throws Exception { } } - private boolean isWarnExceptionReceived(JsonLogLine line) { - return line.level().equals("WARN") - && line.component().equals("o.e.h.AbstractHttpServerTransport") - && line.nodeName().equals("node-0") - && line.message().contains("caught exception while handling client http traffic"); - } - private void debugLogs(Path path) throws IOException { try (BufferedReader reader = Files.newBufferedReader(path)) { - Terminal terminal = Terminal.DEFAULT; - reader.lines().forEach(line -> terminal.println(line)); + reader.lines().forEach(line -> logger.info(line)); } } - private boolean isFatalErrorInThreadExiting(JsonLogLine line) { - return line.level().equals("ERROR") - && line.component().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") - && line.nodeName().equals("node-0") - && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); - } - - private boolean isFatalError(JsonLogLine line) { - return line.level().equals("ERROR") - && line.component().equals("o.e.ExceptionsHelper") - && line.nodeName().equals("node-0") - && line.message().contains("fatal error"); - } - @Override protected boolean preserveClusterUponCompletion() { // as the cluster is dead its state can not be wiped successfully so we have to bypass wiping the cluster diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 62614ca36cda6..2f9239e5c2f22 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -31,7 +31,7 @@ dependencies { // TODO: give each evil test its own fresh JVM for more isolation. -unitTest { +test { systemProperty 'tests.security.manager', 'false' } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index cd5e836757c35..15c44f38f7cf3 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -92,10 +92,10 @@ for (Version version : bwcVersions.indexCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version -task integTest { +task bwcTestSnapshots { if (project.bwc_tests_enabled) { for (final def version : bwcVersions.unreleasedIndexCompatible) { dependsOn "v${version}#bwcTest" @@ -103,5 +103,17 @@ task integTest { } } -check.dependsOn(integTest) +check.dependsOn(bwcTestSnapshots) +configurations { + testArtifacts.extendsFrom testRuntime +} + +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output +} + +artifacts { + testArtifacts testJar +} \ No newline at end of file diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 9740ff4222d7e..97f87d10230a1 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -22,12 +22,10 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.WarningFailureException; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.CheckedFunction; @@ -38,6 +36,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestGetAction; +import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.rest.action.document.RestUpdateAction; import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -63,7 +62,6 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -80,15 +78,20 @@ */ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); - private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private String index; + private String type; @Before public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } + @Before + public void setType() { + type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; + } + public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { @@ -102,7 +105,9 @@ public void testSearch() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("string"); @@ -121,31 +126,35 @@ public void testSearch() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - RequestOptions.Builder options = createIndex.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - createIndex.setOptions(options); + createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); count = randomIntBetween(2000, 3000); byte[] randomByteArray = new byte[16]; random().nextBytes(randomByteArray); - indexRandomDocuments(count, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("string", randomAlphaOfLength(10)) - .field("int", randomInt(100)) - .field("float", randomFloat()) - // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct - .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) - .field("field.with.dots", randomAlphaOfLength(10)) - .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) - .endObject(); - }); + indexRandomDocuments( + count, + true, + true, + i -> JsonXContent.contentBuilder().startObject() + .field("string", randomAlphaOfLength(10)) + .field("int", randomInt(100)) + .field("float", randomFloat()) + // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct + .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) + .field("field.with.dots", randomAlphaOfLength(10)) + .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) + .endObject() + ); refresh(); } else { count = countOfIndexedRandomDocuments(); @@ -155,7 +164,7 @@ public void testSearch() throws Exception { assertBasicSearchWorks(count); assertAllSearchWorks(count); assertBasicAggregationWorks(); - assertRealtimeGetWorks(); + assertRealtimeGetWorks(type); assertStoredBinaryFields(count); } @@ -171,7 +180,9 @@ public void testNewReplicasWork() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -179,23 +190,21 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - RequestOptions.Builder options = createIndex.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - createIndex.setOptions(options); + createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); - indexRandomDocuments(numDocs, true, false, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, true, false, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); logger.info("Refreshing [{}]", index); client().performRequest(new Request("POST", "/" + index + "/_refresh")); } else { @@ -204,7 +213,7 @@ public void testNewReplicasWork() throws Exception { logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); - Response response = client().performRequest(setNumberOfReplicas); + client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -225,76 +234,6 @@ public void testNewReplicasWork() throws Exception { } } - /** - * Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be - * search-able though. - */ - public void testAliasWithBadName() throws Exception { - assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", - getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED)); - - int count; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("key"); - mappingsAndSettings.field("type", "keyword"); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - String aliasName = "%23" + index; // %23 == # - client().performRequest(new Request("PUT", "/" + index + "/_alias/" + aliasName)); - Response response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - count = randomIntBetween(32, 128); - indexRandomDocuments(count, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("key", "value") - .endObject(); - }); - refresh(); - } else { - count = countOfIndexedRandomDocuments(); - } - - Request request = new Request("GET", "/_cluster/state"); - request.addParameter("metric", "metadata"); - logger.error("clusterState=" + entityAsMap(client().performRequest(request))); - // We can read from the alias just like we can read from the index. - String aliasName = "%23" + index; // %23 == # - Map searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); - int totalHits = extractTotalHits(searchRsp); - assertEquals(count, totalHits); - if (isRunningAgainstOldCluster() == false) { - // We can remove the alias. - Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); - assertEquals(200, response.getStatusLine().getStatusCode()); - // and check that it is gone: - response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName)); - assertEquals(404, response.getStatusLine().getStatusCode()); - } - } - public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -348,31 +287,43 @@ public void testShrink() throws IOException { mappingsAndSettings.startObject(); { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); { - mappingsAndSettings.startObject("field"); - mappingsAndSettings.field("type", "text"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster() == false) { + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + { + mappingsAndSettings.field("index.number_of_shards", 5); + } + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - RequestOptions.Builder options = createIndex.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - createIndex.setOptions(options); + createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); - indexRandomDocuments(numDocs, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, true, true, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); ensureGreen(index); // wait for source index to be available on both nodes before starting shrink @@ -381,7 +332,7 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().onOrAfter(Version.V_6_4_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_4_0) && getOldClusterVersion().before(Version.V_7_0_0)) { shrinkIndexRequest.addParameter("copy_settings", "true"); } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); @@ -419,31 +370,45 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.startObject(); { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); { - mappingsAndSettings.startObject("field"); - mappingsAndSettings.field("type", "text"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster() == false) { + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("index.number_of_shards", 5); + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - RequestOptions.Builder options = createIndex.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - createIndex.setOptions(options); + createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); - indexRandomDocuments(numDocs, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, + true, + true, + i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() + ); } else { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink @@ -510,7 +475,7 @@ public void testRollover() throws IOException { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request bulkRequest = new Request("POST", "/" + index + "_write/doc/_bulk"); + Request bulkRequest = new Request("POST", "/" + index + "_write/" + type + "/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); bulkRequest.addParameter("refresh", ""); bulkRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); @@ -518,7 +483,7 @@ public void testRollover() throws IOException { if (isRunningAgainstOldCluster()) { Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setOptions(allowTypeRemovalWarnings()); + rolloverRequest.setOptions(allowTypesRemovalWarnings()); rolloverRequest.setJsonEntity("{" + " \"conditions\": {" + " \"max_docs\": 5" @@ -633,7 +598,7 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(termsCount, boolTerms); } - void assertRealtimeGetWorks() throws IOException { + void assertRealtimeGetWorks(final String typeName) throws IOException { Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}"); client().performRequest(disableAutoRefresh); @@ -644,13 +609,15 @@ void assertRealtimeGetWorks() throws IOException { Map hit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/doc/" + docId + "/_update"); + Request updateRequest = new Request("POST", "/" + index + "/" + typeName + "/" + docId + "/_update"); updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}"); client().performRequest(updateRequest); - Request getRequest = new Request("GET", "/" + index + "/doc/" + docId); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "/" + index + "/" + typeName + "/" + docId); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } Map getRsp = entityAsMap(client().performRequest(getRequest)); Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); @@ -689,7 +656,7 @@ static void assertNoFailures(Map response) { void assertTotalHits(int expectedTotalHits, Map response) { int actualTotalHits = extractTotalHits(response); - assertEquals(expectedTotalHits, actualTotalHits); + assertEquals(response.toString(), expectedTotalHits, actualTotalHits); } int extractTotalHits(Map response) { @@ -704,7 +671,7 @@ int extractTotalHits(Map response) { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/" + index + "/doc/1"; + String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -715,7 +682,9 @@ public void testSingleDoc() throws IOException { Request request = new Request("GET", docLocation); - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } assertThat(toStr(client().performRequest(request)), containsString(doc)); } @@ -779,8 +748,12 @@ public void testRecovery() throws Exception { } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog - indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false, - i -> jsonBuilder().startObject().field("field", "value").endObject()); + indexRandomDocuments( + count / 10, + false, // flushing here would invalidate the whole thing + false, + i -> jsonBuilder().startObject().field("field", "value").endObject() + ); } saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog)); } else { @@ -791,6 +764,7 @@ public void testRecovery() throws Exception { // Count the documents in the index to make sure we have as many as we put there Request countRequest = new Request("GET", "/" + index + "/_search"); countRequest.addParameter("size", "0"); + refresh(); Map countResponse = entityAsMap(client().performRequest(countRequest)); assertTotalHits(count, countResponse); @@ -894,13 +868,19 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); templateBuilder.startObject("mappings"); { - templateBuilder.startObject("doc"); { - templateBuilder.startObject("_source"); { + if (isRunningAgainstAncientCluster()) { + templateBuilder.startObject(type); + } + { + templateBuilder.startObject("_source"); + { templateBuilder.field("enabled", true); } templateBuilder.endObject(); } - templateBuilder.endObject(); + if (isRunningAgainstAncientCluster()) { + templateBuilder.endObject(); + } } templateBuilder.endObject(); templateBuilder.startObject("aliases"); { @@ -919,13 +899,7 @@ public void testSnapshotRestore() throws IOException { templateBuilder.endObject().endObject(); Request createTemplateRequest = new Request("PUT", "/_template/test_template"); createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); - - // In 7.0, type names are no longer expected by default in put index template requests. - // We therefore use the deprecated typed APIs when running against the current version. - if (isRunningAgainstOldCluster() == false) { - createTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); - } - createTemplateRequest.setOptions(allowTypeRemovalWarnings()); + createTemplateRequest.setOptions(allowTypesRemovalWarnings()); client().performRequest(createTemplateRequest); @@ -1016,12 +990,13 @@ public void testSoftDeletes() throws Exception { int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/doc/" + i); + Request request = new Request("POST", "/" + index + "/" + type + "/" + i); + if (isRunningAgainstAncientCluster() == false) { + request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } request.setJsonEntity(doc); client().performRequest(request); - if (rarely()) { - refresh(); - } + refresh(); } client().performRequest(new Request("POST", "/" + index + "/_flush")); int liveDocs = numDocs; @@ -1029,11 +1004,11 @@ public void testSoftDeletes() throws Exception { for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/doc/" + i); + Request request = new Request("POST", "/" + index + "/" + type + "/" + i); request.setJsonEntity(doc); client().performRequest(request); } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", "/" + index + "/doc/" + i)); + client().performRequest(new Request("DELETE", "/" + index + "/" + type + "/" + i)); liveDocs--; } } @@ -1046,7 +1021,7 @@ public void testSoftDeletes() throws Exception { } } - private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1103,7 +1078,7 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/doc/_bulk"); + Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/" + type + "/_bulk"); writeToRestoredRequest.addParameter("refresh", "true"); writeToRestoredRequest.setJsonEntity(bulk.toString()); writeToRestoredRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); @@ -1129,13 +1104,7 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b // Check that the template was restored successfully Request getTemplateRequest = new Request("GET", "/_template/test_template"); - - // In 7.0, type names are no longer returned by default in get index template requests. - // We therefore use the deprecated typed APIs when running against the current version. - if (isRunningAgainstOldCluster() == false) { - getTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); - } - getTemplateRequest.setOptions(allowTypeRemovalWarnings()); + getTemplateRequest.setOptions(allowTypesRemovalWarnings()); Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); Map expectedTemplate = new HashMap<>(); @@ -1145,7 +1114,14 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b expectedTemplate.put("index_patterns", singletonList("evil_*")); } expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); - expectedTemplate.put("mappings", singletonMap("doc", singletonMap("_source", singletonMap("enabled", true)))); + // We don't have the type in the response starting with 7.0, but we won't have it on old cluster after upgrade + // either so look at the response to figure out the correct assertions + if (isTypeInTemplateResponse(getTemplateResponse)) { + expectedTemplate.put("mappings", singletonMap(type, singletonMap("_source", singletonMap("enabled", true)))); + } else { + expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); + } + expectedTemplate.put("order", 0); Map aliases = new HashMap<>(); aliases.put("alias1", emptyMap()); @@ -1155,18 +1131,33 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b if (false == expectedTemplate.equals(getTemplateResponse)) { NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); builder.compareMaps(getTemplateResponse, expectedTemplate); + logger.info("expected: {}\nactual:{}", expectedTemplate, getTemplateResponse); fail("template doesn't match:\n" + builder.toString()); } } + @SuppressWarnings("unchecked") + private boolean isTypeInTemplateResponse(Map getTemplateResponse) { + return ( (Map) ( + (Map) getTemplateResponse.getOrDefault("test_template", emptyMap()) + ).get("mappings")).get("_source") == null; + } + // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. - private void indexRandomDocuments(int count, boolean flushAllowed, boolean saveInfo, - CheckedFunction docSupplier) throws IOException { + private void indexRandomDocuments( + final int count, + final boolean flushAllowed, + final boolean saveInfo, + final CheckedFunction docSupplier) + throws IOException { logger.info("Indexing {} random documents", count); for (int i = 0; i < count; i++) { logger.debug("Indexing document [{}]", i); - Request createDocument = new Request("POST", "/" + index + "/doc/" + i); + Request createDocument = new Request("POST", "/" + index + "/" + type + "/" + i); + if (isRunningAgainstAncientCluster() == false) { + createDocument.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); + } createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); client().performRequest(createDocument); if (rarely()) { @@ -1191,16 +1182,21 @@ private void saveInfoDocument(String type, String value) throws IOException { infoDoc.field("value", value); infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/doc/" + index + "_" + type); + Request request = new Request("PUT", "/info/" + this.type + "/" + index + "_" + type); request.addParameter("op_type", "create"); request.setJsonEntity(Strings.toString(infoDoc)); + if (isRunningAgainstAncientCluster() == false) { + request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } client().performRequest(request); } private String loadInfoDocument(String type) throws IOException { - Request request = new Request("GET", "/info/doc/" + index + "_" + type); + Request request = new Request("GET", "/info/" + this.type + "/" + index + "_" + type); request.addParameter("filter_path", "_source"); - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } String doc = toStr(client().performRequest(request)); Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); assertTrue(doc, m.find()); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index f22b1b44c0763..fb4e33863cacf 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,10 +20,9 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -145,6 +144,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { + final String type = getOldClusterVersion().before(Version.V_7_0_0) ? "doc" : "_doc"; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -157,7 +157,9 @@ public void testQueryBuilderBWC() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("query"); @@ -176,19 +178,19 @@ public void testQueryBuilderBWC() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); Request request = new Request("PUT", "/" + index); - RequestOptions.Builder options = request.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - request.setOptions(options); + request.setOptions(allowTypesRemovalWarnings()); request.setJsonEntity(Strings.toString(mappingsAndSettings)); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i)); + request = new Request("PUT", "/" + index + "/" + type + "/" + Integer.toString(i)); request.setJsonEntity((String) CANDIDATES.get(i)[0]); rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle index 0abdc1247514a..e7f63f4c26a38 100644 --- a/qa/logging-config/build.gradle +++ b/qa/logging-config/build.gradle @@ -33,10 +33,10 @@ integTestCluster { } integTestRunner { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" } -unitTest { +test { systemProperty 'tests.security.manager', 'false' } diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index b484ba90a4da3..bbb20737c4708 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -46,6 +46,7 @@ * It has to be in a org.elasticsearch.common.logging package to use PrefixLogger */ public class JsonLoggerTests extends ESTestCase { + private static final String LINE_SEPARATOR = System.lineSeparator(); @BeforeClass public static void initNodeName() { @@ -109,15 +110,15 @@ public void testPrefixLoggerInJson() throws IOException { public void testJsonInMessage() throws IOException { final Logger testLogger = LogManager.getLogger("test"); - String json = "{\n" + - " \"terms\" : {\n" + - " \"user\" : [\n" + - " \"u1\",\n" + - " \"u2\",\n" + - " \"u3\"\n" + - " ],\n" + - " \"boost\" : 1.0\n" + - " }\n" + + String json = "{" + LINE_SEPARATOR + + " \"terms\" : {" + LINE_SEPARATOR + + " \"user\" : [" + LINE_SEPARATOR + + " \"u1\"," + LINE_SEPARATOR + + " \"u2\"," + LINE_SEPARATOR + + " \"u3\"" + LINE_SEPARATOR + + " ]," + LINE_SEPARATOR + + " \"boost\" : 1.0" + LINE_SEPARATOR + + " }" + LINE_SEPARATOR + "}"; testLogger.info(json); @@ -151,15 +152,15 @@ public void testStacktrace() throws IOException { public void testJsonInStacktraceMessageIsSplitted() throws IOException { final Logger testLogger = LogManager.getLogger("test"); - String json = "{\n" + - " \"terms\" : {\n" + - " \"user\" : [\n" + - " \"u1\",\n" + - " \"u2\",\n" + - " \"u3\"\n" + - " ],\n" + - " \"boost\" : 1.0\n" + - " }\n" + + String json = "{" + LINE_SEPARATOR + + " \"terms\" : {" + LINE_SEPARATOR + + " \"user\" : [" + LINE_SEPARATOR + + " \"u1\"," + LINE_SEPARATOR + + " \"u2\"," + LINE_SEPARATOR + + " \"u3\"" + LINE_SEPARATOR + + " ]," + LINE_SEPARATOR + + " \"boost\" : 1.0" + LINE_SEPARATOR + + " }" + LINE_SEPARATOR + "}"; testLogger.error("error message " + json, new Exception(json)); @@ -173,7 +174,7 @@ public void testJsonInStacktraceMessageIsSplitted() throws IOException { logLine("file", Level.ERROR, "sample-name", "test", "error message " + json), //stacktrace field will have each json line will in a separate array element - stacktraceWith(("java.lang.Exception: " + json).split("\n")) + stacktraceWith(("java.lang.Exception: " + json).split(LINE_SEPARATOR)) ) )); } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 3de7d4132bbe3..91480c4e3d244 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -60,10 +60,10 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version -task integTest { +task bwcTestSnapshots { if (project.bwc_tests_enabled) { for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" @@ -71,4 +71,4 @@ task integTest { } } -check.dependsOn(integTest) +check.dependsOn(bwcTestSnapshots) diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 6942331c97c25..787e1362e8daa 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -53,6 +53,6 @@ task integTest { dependsOn = [mixedClusterTest] } -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only integration tests check.dependsOn(integTest) diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 160edea6a7898..7f910da2ee5e8 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -70,6 +70,7 @@ for (Version version : bwcVersions.wireCompatible) { Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') } Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> @@ -95,6 +96,7 @@ for (Version version : bwcVersions.wireCompatible) { Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', version.toString() systemProperty 'tests.first_round', 'true' finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } @@ -108,6 +110,7 @@ for (Version version : bwcVersions.wireCompatible) { Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', version.toString() systemProperty 'tests.first_round', 'false' finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } @@ -121,6 +124,7 @@ for (Version version : bwcVersions.wireCompatible) { Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', version.toString() /* * Force stopping all the upgraded nodes after the test runner * so they are alive during the test. @@ -139,10 +143,10 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version -task integTest { +task bwcTestSnapshots { if (project.bwc_tests_enabled) { for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" @@ -150,4 +154,4 @@ task integTest { } } -check.dependsOn(integTest) +check.dependsOn(bwcTestSnapshots) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 1c57be7abbaa1..398627baeddd1 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.upgrades; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; @@ -42,6 +43,7 @@ public static ClusterType parse(String value) { } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); @Override protected final boolean preserveIndicesUponCompletion() { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MappingIT.java new file mode 100644 index 0000000000000..b61b94e67edc4 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MappingIT.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.xcontent.support.XContentMapValues; + +public class MappingIT extends AbstractRollingTestCase { + /** + * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) + * and check that it can be upgraded to 7x. + */ + public void testAllFieldDisable6x() throws Exception { + assumeTrue("_all", UPGRADE_FROM_VERSION.before(Version.V_7_0_0)); + switch (CLUSTER_TYPE) { + case OLD: + Request createTestIndex = new Request("PUT", "all-index"); + createTestIndex.addParameter("include_type_name", "false"); + createTestIndex.setJsonEntity( + "{ \"settings\": { \"index.number_of_shards\": 1 }, " + + "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" + ); + createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + + " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); + Response resp = client().performRequest(createTestIndex); + assertEquals(200, resp.getStatusLine().getStatusCode()); + break; + + default: + final Request request = new Request("GET", "all-index"); + Response response = client().performRequest(request); + assertEquals(200, response.getStatusLine().getStatusCode()); + Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); + assertNotNull(enabled); + assertEquals(false, enabled); + break; + } + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 3666a64896ae2..128a7ef6934f1 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.upgrades; +import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; @@ -26,8 +27,10 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.document.RestIndexAction; +import org.elasticsearch.rest.action.document.RestUpdateAction; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -35,6 +38,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; @@ -43,6 +47,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.notNullValue; /** @@ -50,7 +55,6 @@ */ public class RecoveryIT extends AbstractRollingTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31291") public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; if (CLUSTER_TYPE == ClusterType.OLD) { @@ -164,14 +168,27 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { } private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { - final Request request = new Request("GET", index + "/_count"); - request.addParameter("preference", preference); - final Response response = client().performRequest(request); - final int actualCount = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("count").toString()); - assertThat("preference [" + preference + "]", actualCount, equalTo(expectedCount)); + final int actualDocs; + try { + final Request request = new Request("GET", index + "/_count"); + if (preference != null) { + request.addParameter("preference", preference); + } + final Response response = client().performRequest(request); + actualDocs = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("count").toString()); + } catch (ResponseException e) { + try { + final Response recoveryStateResponse = client().performRequest(new Request("GET", index + "/_recovery")); + fail("failed to get doc count for index [" + index + "] with preference [" + preference + "]" + " response [" + e + "]" + + " recovery [" + EntityUtils.toString(recoveryStateResponse.getEntity()) + "]"); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } + assertThat("preference [" + preference + "]", actualDocs, equalTo(expectedCount)); } - private String getNodeId(Predicate versionPredicate) throws IOException { Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -185,7 +202,6 @@ private String getNodeId(Predicate versionPredicate) throws IOException return null; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34950") public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; switch (CLUSTER_TYPE) { @@ -219,6 +235,15 @@ public void testRelocationWithConcurrentIndexing() throws Exception { ensureNoInitializingShards(); // wait for all other shard activity to finish updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._id", newNode)); asyncIndexDocs(index, 10, 50).get(); + // ensure the relocation from old node to new node has occurred; otherwise ensureGreen can + // return true even though shards haven't moved to the new node yet (allocation was throttled). + assertBusy(() -> { + Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + String xpath = "routing_table.indices." + index + ".shards.0.node"; + @SuppressWarnings("unchecked") List assignedNodes = (List) XContentMapValues.extractValue(xpath, state); + assertNotNull(state.toString(), assignedNodes); + assertThat(state.toString(), newNode, isIn(assignedNodes)); + }, 60, TimeUnit.SECONDS); ensureGreen(index); client().performRequest(new Request("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + newNode, 60); @@ -300,10 +325,33 @@ public void testRecoveryWithSoftDeletes() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); + if (getNodeId(v -> v.onOrAfter(Version.V_7_0_0)) == null) { + client().performRequest(new Request("DELETE", index + "/test/" + i)); + } else { + client().performRequest(new Request("DELETE", index + "/_doc/" + i)); + } } } } ensureGreen(index); } + + /** Ensure that we can always execute update requests regardless of the version of cluster */ + public void testUpdateDoc() throws Exception { + final String index = "test_update_doc"; + if (CLUSTER_TYPE == ClusterType.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + createIndex(index, settings.build()); + } + ensureGreen(index); + indexDocs(index, 0, 10); + for (int i = 0; i < 10; i++) { + Request update = new Request("POST", index + "/test/" + i + "/_update/"); + update.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); + update.setJsonEntity("{\"doc\": {\"f\": " + randomNonNegativeLong() + "}}"); + client().performRequest(update); + } + } } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index bd07ee8a58469..375ba12a35621 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -67,8 +67,3 @@ - match: { hits.total: 1 } - match: { hits.hits.0._id: q3 } ---- -"Index with _all is available": - - do: - indices.get: - index: all-index diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index a26a3f8274d99..2672cee7cc78a 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -200,19 +200,3 @@ wait_for_completion: true task_id: $task ---- -"Create an index with _all explicitly disabled": - - skip: - features: warnings - - do: - warnings: - - "[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement, you can use [copy_to] on mapping fields to create your own catch all field." - indices.create: - index: all-index - body: - mappings: - _all: - enabled: false - properties: - field: - type: text diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 63e67652127e9..78a3205ae13b0 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -125,17 +125,3 @@ task_id: $task_id - match: { task.headers.X-Opaque-Id: "Reindexing Again" } ---- -"Index with _all is available": - - do: - indices.get: - index: all-index - - - do: - indices.get_mapping: - include_type_name: false - index: all-index - - - is_true: all-index.mappings._all - - match: { all-index.mappings._all.enabled: false} - diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index 5aba0562e03f6..3b0faa10a7e33 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -29,6 +29,6 @@ integTestCluster { } integTestRunner { - systemProperty 'tests.logfile', + nonInputProperties.systemProperty 'tests.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index bd5f3e7a2ac1c..ac0bfe78aadd3 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -71,7 +71,7 @@ forbiddenApisMain { } // we don't have additional tests for the tests themselves -tasks.unitTest.enabled = false +tasks.test.enabled = false // Tests are destructive and meant to run in a VM, they don't adhere to general conventions testingConventions.enabled = false diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java index a2b11eaff5571..06c978b823a61 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java @@ -21,28 +21,36 @@ import org.elasticsearch.packaging.test.DefaultDebBasicTests; import org.elasticsearch.packaging.test.DefaultDebPreservationTests; +import org.elasticsearch.packaging.test.DefaultLinuxTarTests; +import org.elasticsearch.packaging.test.DefaultNoJdkDebBasicTests; +import org.elasticsearch.packaging.test.DefaultNoJdkLinuxTarTests; +import org.elasticsearch.packaging.test.DefaultNoJdkRpmBasicTests; +import org.elasticsearch.packaging.test.DefaultNoJdkWindowsZipTests; import org.elasticsearch.packaging.test.DefaultRpmBasicTests; import org.elasticsearch.packaging.test.DefaultRpmPreservationTests; -import org.elasticsearch.packaging.test.DefaultTarTests; import org.elasticsearch.packaging.test.DefaultWindowsServiceTests; -import org.elasticsearch.packaging.test.DefaultZipTests; +import org.elasticsearch.packaging.test.DefaultWindowsZipTests; import org.elasticsearch.packaging.test.OssDebBasicTests; import org.elasticsearch.packaging.test.OssDebPreservationTests; +import org.elasticsearch.packaging.test.OssLinuxTarTests; +import org.elasticsearch.packaging.test.OssNoJdkDebBasicTests; +import org.elasticsearch.packaging.test.OssNoJdkLinuxTarTests; +import org.elasticsearch.packaging.test.OssNoJdkRpmBasicTests; +import org.elasticsearch.packaging.test.OssNoJdkWindowsZipTests; import org.elasticsearch.packaging.test.OssRpmBasicTests; import org.elasticsearch.packaging.test.OssRpmPreservationTests; -import org.elasticsearch.packaging.test.OssTarTests; import org.elasticsearch.packaging.test.OssWindowsServiceTests; -import org.elasticsearch.packaging.test.OssZipTests; +import org.elasticsearch.packaging.test.OssWindowsZipTests; import org.junit.runner.RunWith; import org.junit.runners.Suite; import org.junit.runners.Suite.SuiteClasses; @RunWith(Suite.class) @SuiteClasses({ - DefaultTarTests.class, - OssTarTests.class, - DefaultZipTests.class, - OssZipTests.class, + DefaultLinuxTarTests.class, + OssLinuxTarTests.class, + DefaultWindowsZipTests.class, + OssWindowsZipTests.class, DefaultRpmBasicTests.class, OssRpmBasicTests.class, DefaultDebBasicTests.class, @@ -52,6 +60,14 @@ DefaultRpmPreservationTests.class, OssRpmPreservationTests.class, DefaultWindowsServiceTests.class, - OssWindowsServiceTests.class + OssWindowsServiceTests.class, + DefaultNoJdkLinuxTarTests.class, + OssNoJdkLinuxTarTests.class, + DefaultNoJdkWindowsZipTests.class, + OssNoJdkWindowsZipTests.class, + DefaultNoJdkRpmBasicTests.class, + OssNoJdkRpmBasicTests.class, + DefaultNoJdkDebBasicTests.class, + OssNoJdkDebBasicTests.class }) public class PackagingTests {} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index a6aad032baa5d..d427017d0b041 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -20,22 +20,24 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.http.client.fluent.Request; import org.elasticsearch.packaging.util.Archives; import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Platforms; import org.elasticsearch.packaging.util.ServerUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; -import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.util.Arrays; +import java.nio.file.Paths; import java.util.stream.Stream; -import static java.util.stream.Collectors.joining; +import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.packaging.util.Archives.ARCHIVE_OWNER; import static org.elasticsearch.packaging.util.Archives.installArchive; import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; @@ -46,9 +48,11 @@ import static org.elasticsearch.packaging.util.FileUtils.cp; import static org.elasticsearch.packaging.util.FileUtils.getTempDir; import static org.elasticsearch.packaging.util.FileUtils.mkdir; +import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.elasticsearch.packaging.util.FileUtils.rm; import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; @@ -62,68 +66,50 @@ @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public abstract class ArchiveTestCase extends PackagingTestCase { - public void test10Install() { + public void test10Install() throws Exception { installation = installArchive(distribution()); verifyArchiveInstallation(installation, distribution()); } - public void test20PluginsListWithNoPlugins() { + public void test20PluginsListWithNoPlugins() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); final Result r = sh.run(bin.elasticsearchPlugin + " list"); assertThat(r.stdout, isEmptyString()); } - public void test30AbortWhenJavaMissing() { + public void test30NoJava() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); + sh.getEnv().remove("JAVA_HOME"); - Platforms.onWindows(() -> { - // on windows, removing java from PATH and removing JAVA_HOME is less involved than changing the permissions of the java - // executable. we also don't check permissions in the windows scripts anyway - final String originalPath = sh.run("$Env:PATH").stdout.trim(); - final String newPath = Arrays.stream(originalPath.split(";")) - .filter(path -> path.contains("Java") == false) - .collect(joining(";")); - - // note the lack of a $ when clearing the JAVA_HOME env variable - with a $ it deletes the java home directory - // https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/providers/environment-provider?view=powershell-6 - // - // this won't persist to another session so we don't have to reset anything - final Result runResult = sh.runIgnoreExitCode( - "$Env:PATH = '" + newPath + "'; " + - "Remove-Item Env:JAVA_HOME; " + - bin.elasticsearch - ); + final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + try { + if (distribution().hasJdk) { + mv(installation.bundledJdk, relocatedJdk); + } + // ask for elasticsearch version to quickly exit if java is actually found (ie test failure) + final Result runResult = sh.runIgnoreExitCode(bin.elasticsearch.toString() + " -v"); assertThat(runResult.exitCode, is(1)); - assertThat(runResult.stderr, containsString("could not find java; set JAVA_HOME or ensure java is in PATH")); - }); - - Platforms.onLinux(() -> { - final String javaPath = sh.run("command -v java").stdout.trim(); - - try { - sh.run("chmod -x '" + javaPath + "'"); - final Result runResult = sh.runIgnoreExitCode(bin.elasticsearch.toString()); - assertThat(runResult.exitCode, is(1)); - assertThat(runResult.stderr, containsString("could not find java; set JAVA_HOME or ensure java is in PATH")); - } finally { - sh.run("chmod +x '" + javaPath + "'"); + assertThat(runResult.stderr, containsString("could not find java in JAVA_HOME or bundled")); + } finally { + if (distribution().hasJdk) { + mv(relocatedJdk, installation.bundledJdk); } - }); + } } - public void test40CreateKeystoreManually() { + public void test40CreateKeystoreManually() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " create")); @@ -152,30 +138,127 @@ public void test40CreateKeystoreManually() { }); } - public void test50StartAndStop() throws IOException { + public void test50StartAndStop() throws Exception { assumeThat(installation, is(notNullValue())); // cleanup from previous test rm(installation.config("elasticsearch.keystore")); - Archives.runElasticsearch(installation); + Archives.runElasticsearch(installation, newShell()); - final String gcLogName = Platforms.LINUX + final String gcLogName = Platforms.LINUX && distribution().hasJdk == false ? "gc.log.0.current" : "gc.log"; + assertTrue("gc logs exist", Files.exists(installation.logs.resolve(gcLogName))); ServerUtils.runElasticsearchTests(); Archives.stopElasticsearch(installation); } - public void test60AutoCreateKeystore() { + public void assertRunsWithJavaHome() throws Exception { + Shell sh = newShell(); + + Platforms.onLinux(() -> { + String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("JAVA_HOME", systemJavaHome); + }); + Platforms.onWindows(() -> { + final String systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("JAVA_HOME", systemJavaHome); + }); + + Archives.runElasticsearch(installation, sh); + ServerUtils.runElasticsearchTests(); + Archives.stopElasticsearch(installation); + + String systemJavaHome = sh.getEnv().get("JAVA_HOME"); + Path log = installation.logs.resolve("elasticsearch.log"); + assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); + } + + public void test51JavaHomeOverride() throws Exception { + assumeThat(installation, is(notNullValue())); + + assertRunsWithJavaHome(); + } + + public void test52BundledJdkRemoved() throws Exception { + assumeThat(installation, is(notNullValue())); + assumeThat(distribution().hasJdk, is(true)); + + Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + try { + mv(installation.bundledJdk, relocatedJdk); + assertRunsWithJavaHome(); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } + } + + public void test53JavaHomeWithSpecialCharacters() throws Exception { + assumeThat(installation, is(notNullValue())); + + Platforms.onWindows(() -> { + final Shell sh = new Shell(); + try { + // once windows 2012 is no longer supported and powershell 5.0 is always available we can change this command + sh.run("cmd /c mklink /D 'C:\\Program Files (x86)\\java' $Env:SYSTEM_JAVA_HOME"); + + sh.getEnv().put("JAVA_HOME", "C:\\Program Files (x86)\\java"); + + //verify ES can start, stop and run plugin list + Archives.runElasticsearch(installation, sh); + + Archives.stopElasticsearch(installation); + + String pluginListCommand = installation.bin + "/elasticsearch-plugin list"; + Result result = sh.run(pluginListCommand); + assertThat(result.exitCode, equalTo(0)); + + } finally { + //clean up sym link + sh.run("cmd /c rmdir 'C:\\Program Files (x86)\\java' "); + } + }); + + Platforms.onLinux(() -> { + final Shell sh = new Shell(); + // Create temporary directory with a space and link to java binary. + // Use it as java_home + String nameWithSpace = RandomStrings.randomAsciiAlphanumOfLength(getRandom(), 10) + "java home"; + String test_java_home = FileUtils.mkdir(Paths.get("/home",ARCHIVE_OWNER, nameWithSpace)).toAbsolutePath().toString(); + try { + final String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + final String java = systemJavaHome + "/bin/java"; + + sh.run("mkdir -p \"" + test_java_home + "/bin\""); + sh.run("ln -s \"" + java + "\" \"" + test_java_home + "/bin/java\""); + sh.run("chown -R " + ARCHIVE_OWNER + ":" + ARCHIVE_OWNER + " \"" + test_java_home + "\""); + + sh.getEnv().put("JAVA_HOME", test_java_home); + + //verify ES can start, stop and run plugin list + Archives.runElasticsearch(installation, sh); + + Archives.stopElasticsearch(installation); + + String pluginListCommand = installation.bin + "/elasticsearch-plugin list"; + Result result = sh.run(pluginListCommand); + assertThat(result.exitCode, equalTo(0)); + } finally { + FileUtils.rm(Paths.get("\"" + test_java_home + "\"")); + } + }); + } + + public void test60AutoCreateKeystore() throws Exception { assumeThat(installation, is(notNullValue())); assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.onLinux(() -> { final Result result = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list"); @@ -188,7 +271,7 @@ public void test60AutoCreateKeystore() { }); } - public void test70CustomPathConfAndJvmOptions() throws IOException { + public void test70CustomPathConfAndJvmOptions() throws Exception { assumeThat(installation, is(notNullValue())); final Path tempConf = getTempDir().resolve("esconf-alternate"); @@ -207,7 +290,7 @@ public void test70CustomPathConfAndJvmOptions() throws IOException { "-Dlog4j2.disable.jmx=true\n"; append(tempConf.resolve("jvm.options"), jvmOptions); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + tempConf)); Platforms.onWindows(() -> sh.run( "$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " + @@ -220,7 +303,7 @@ public void test70CustomPathConfAndJvmOptions() throws IOException { "}" )); - final Shell serverShell = new Shell(); + final Shell serverShell = newShell(); serverShell.getEnv().put("ES_PATH_CONF", tempConf.toString()); serverShell.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops"); @@ -237,7 +320,7 @@ public void test70CustomPathConfAndJvmOptions() throws IOException { } } - public void test80RelativePathConf() throws IOException { + public void test80RelativePathConf() throws Exception { assumeThat(installation, is(notNullValue())); final Path temp = getTempDir().resolve("esconf-alternate"); @@ -253,7 +336,7 @@ public void test80RelativePathConf() throws IOException { append(tempConf.resolve("elasticsearch.yml"), "node.name: relative"); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + temp)); Platforms.onWindows(() -> sh.run( "$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " + @@ -266,7 +349,8 @@ public void test80RelativePathConf() throws IOException { "}" )); - final Shell serverShell = new Shell(temp); + final Shell serverShell = newShell(); + serverShell.setWorkingDirectory(temp); serverShell.getEnv().put("ES_PATH_CONF", "config"); Archives.runElasticsearch(installation, serverShell); @@ -280,11 +364,11 @@ public void test80RelativePathConf() throws IOException { } } - public void test90SecurityCliPackaging() { + public void test90SecurityCliPackaging() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); @@ -304,14 +388,14 @@ public void test90SecurityCliPackaging() { } } - public void test100ElasticsearchShardCliPackaging() { + public void test91ElasticsearchShardCliPackaging() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.PlatformAction action = () -> { - final Result result = sh.run(bin.elasticsearchShard + " help"); + final Result result = sh.run(bin.elasticsearchShard + " -h"); assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); }; @@ -321,16 +405,61 @@ public void test100ElasticsearchShardCliPackaging() { } } - public void test110ElasticsearchNodeCliPackaging() { + public void test92ElasticsearchNodeCliPackaging() throws Exception { assumeThat(installation, is(notNullValue())); final Installation.Executables bin = installation.executables(); - final Shell sh = new Shell(); + final Shell sh = newShell(); Platforms.PlatformAction action = () -> { final Result result = sh.run(bin.elasticsearchNode + " -h"); assertThat(result.stdout, - containsString("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes")); + containsString("A CLI tool to do unsafe cluster and index manipulations on current node")); + }; + + if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + Platforms.onLinux(action); + Platforms.onWindows(action); + } + } + + public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Exception { + assumeThat(installation, is(notNullValue())); + + Path relativeDataPath = installation.data.relativize(installation.home); + append(installation.config("elasticsearch.yml"), "path.data: " + relativeDataPath); + + final Shell sh = newShell(); + sh.setWorkingDirectory(getTempDir()); + + Archives.runElasticsearch(installation, sh); + Archives.stopElasticsearch(installation); + + Result result = sh.run("echo y | " + installation.executables().elasticsearchNode + " unsafe-bootstrap"); + assertThat(result.stdout, containsString("Master node was successfully bootstrapped")); + } + + public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception { + assumeThat(installation, is(notNullValue())); + + final Installation.Executables bin = installation.executables(); + final Shell sh = newShell(); + // Run the cli tools from the tmp dir + sh.setWorkingDirectory(getTempDir()); + + Platforms.PlatformAction action = () -> { + Result result = sh.run(bin.elasticsearchCertutil+ " -h"); + assertThat(result.stdout, + containsString("Simplifies certificate creation for use with the Elastic Stack")); + result = sh.run(bin.elasticsearchSyskeygen+ " -h"); + assertThat(result.stdout, + containsString("system key tool")); + result = sh.run(bin.elasticsearchSetupPasswords+ " -h"); + assertThat(result.stdout, + containsString("Sets the passwords for reserved users")); + result = sh.run(bin.elasticsearchUsers+ " -h"); + assertThat(result.stdout, + containsString("Manages elasticsearch file users")); }; if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java index 522c038fc2bf3..12597ae8b4de2 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java @@ -36,14 +36,12 @@ import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.install; -import static org.elasticsearch.packaging.util.Packages.remove; import static org.elasticsearch.packaging.util.Packages.packageStatus; +import static org.elasticsearch.packaging.util.Packages.remove; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; @@ -55,7 +53,7 @@ public abstract class DebPreservationTestCase extends PackagingTestCase { protected abstract Distribution distribution(); @BeforeClass - public static void cleanup() { + public static void cleanup() throws Exception { installation = null; cleanEverything(); } @@ -66,14 +64,14 @@ public void onlyCompatibleDistributions() { assumeTrue("only compatible distributions", distribution().packaging.compatible); } - public void test10Install() { + public void test10Install() throws Exception { assertRemoved(distribution()); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution()); + verifyPackageInstallation(installation, distribution(), newShell()); } - public void test20Remove() { + public void test20Remove() throws Exception { assumeThat(installation, is(notNullValue())); remove(distribution()); @@ -118,7 +116,7 @@ public void test20Remove() { assertTrue(Files.exists(installation.envFile)); } - public void test30Purge() { + public void test30Purge() throws Exception { assumeThat(installation, is(notNullValue())); final Shell sh = new Shell(); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java similarity index 94% rename from qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java rename to qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java index b6c633c0e72a2..bcca1a7e9e049 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.packaging.util.Distribution; -public class DefaultTarTests extends ArchiveTestCase { +public class DefaultLinuxTarTests extends ArchiveTestCase { @Override protected Distribution distribution() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java new file mode 100644 index 0000000000000..23c87b6e936c4 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultNoJdkDebBasicTests extends PackageTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_NO_JDK_DEB; + } + +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java new file mode 100644 index 0000000000000..fce7c55671817 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultNoJdkLinuxTarTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_NO_JDK_LINUX; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java new file mode 100644 index 0000000000000..3bb5aa8eae898 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultNoJdkRpmBasicTests extends PackageTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_NO_JDK_RPM; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java new file mode 100644 index 0000000000000..d797bdaa9f312 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class DefaultNoJdkWindowsZipTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.DEFAULT_NO_JDK_WINDOWS; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java similarity index 94% rename from qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java rename to qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java index 852535188cf9b..a7491d5b0ac5d 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.packaging.util.Distribution; -public class DefaultZipTests extends ArchiveTestCase { +public class DefaultWindowsZipTests extends ArchiveTestCase { @Override protected Distribution distribution() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java similarity index 94% rename from qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java rename to qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java index 3e72f1da5cbb0..bf4305aab53ed 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.packaging.util.Distribution; -public class OssTarTests extends ArchiveTestCase { +public class OssLinuxTarTests extends ArchiveTestCase { @Override protected Distribution distribution() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java new file mode 100644 index 0000000000000..47d2f662f4d31 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssNoJdkDebBasicTests extends PackageTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_NO_JDK_DEB; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java new file mode 100644 index 0000000000000..dae5068f36238 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssNoJdkLinuxTarTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_NO_JDK_LINUX; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java new file mode 100644 index 0000000000000..1ebf704303982 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssNoJdkRpmBasicTests extends PackageTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_NO_JDK_RPM; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java new file mode 100644 index 0000000000000..639137e887985 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.packaging.util.Distribution; + +public class OssNoJdkWindowsZipTests extends ArchiveTestCase { + + @Override + protected Distribution distribution() { + return Distribution.OSS_NO_JDK_WINDOWS; + } +} diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java similarity index 94% rename from qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java rename to qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java index 418ba6d344650..2a0df6cab96cf 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.packaging.util.Distribution; -public class OssZipTests extends ArchiveTestCase { +public class OssWindowsZipTests extends ArchiveTestCase { @Override protected Distribution distribution() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index cc2ae29653c42..c664e28931087 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -20,30 +20,46 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; +import org.hamcrest.CoreMatchers; import org.junit.Before; -import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; +import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; +import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist; +import static org.elasticsearch.packaging.util.FileUtils.cp; +import static org.elasticsearch.packaging.util.FileUtils.fileWithGlobExist; +import static org.elasticsearch.packaging.util.FileUtils.mkdir; import static org.elasticsearch.packaging.util.FileUtils.mv; +import static org.elasticsearch.packaging.util.FileUtils.rm; +import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.install; import static org.elasticsearch.packaging.util.Packages.remove; -import static org.elasticsearch.packaging.util.Packages.runInstallCommand; +import static org.elasticsearch.packaging.util.Packages.restartElasticsearch; import static org.elasticsearch.packaging.util.Packages.startElasticsearch; +import static org.elasticsearch.packaging.util.Packages.stopElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; import static org.elasticsearch.packaging.util.Platforms.isSystemd; +import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.containsString; @@ -54,64 +70,102 @@ @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public abstract class PackageTestCase extends PackagingTestCase { + private Shell sh; @Before - public void onlyCompatibleDistributions() { + public void onlyCompatibleDistributions() throws Exception { assumeTrue("only compatible distributions", distribution().packaging.compatible); + sh = newShell(); } - public void test05InstallFailsWhenJavaMissing() { - final Shell sh = new Shell(); - final Result java = sh.run("command -v java"); - - final Path originalJavaPath = Paths.get(java.stdout.trim()); - final Path relocatedJavaPath = originalJavaPath.getParent().resolve("java.relocated"); - try { - mv(originalJavaPath, relocatedJavaPath); - final Result installResult = runInstallCommand(distribution()); - assertThat(installResult.exitCode, is(1)); - assertThat(installResult.stderr, containsString("could not find java; set JAVA_HOME or ensure java is in PATH")); - } finally { - mv(relocatedJavaPath, originalJavaPath); - } - } - - public void test10InstallPackage() { + public void test10InstallPackage() throws Exception { assertRemoved(distribution()); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution()); + verifyPackageInstallation(installation, distribution(), sh); } - public void test20PluginsCommandWhenNoPlugins() { + public void test20PluginsCommandWhenNoPlugins() throws Exception { assumeThat(installation, is(notNullValue())); - final Shell sh = new Shell(); assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); } - public void test30InstallDoesNotStartServer() { + public void test30DaemonIsNotEnabledOnRestart() { + if (isSystemd()) { + sh.run("systemctl daemon-reload"); + String isEnabledOutput = sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").stdout.trim(); + assertThat(isEnabledOutput, equalTo("disabled")); + } + } + + public void test31InstallDoesNotStartServer() { assumeThat(installation, is(notNullValue())); - final Shell sh = new Shell(); assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); } - public void test40StartServer() throws IOException { + public void assertRunsWithJavaHome() throws Exception { + String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + byte[] originalEnvFile = Files.readAllBytes(installation.envFile); + try { + Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8), + StandardOpenOption.APPEND); + startElasticsearch(sh); + runElasticsearchTests(); + stopElasticsearch(sh); + } finally { + Files.write(installation.envFile, originalEnvFile); + } + + Path log = installation.logs.resolve("elasticsearch.log"); + assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); + } + + public void test32JavaHomeOverride() throws Exception { assumeThat(installation, is(notNullValue())); + // we always run with java home when no bundled jdk is included, so this test would be repetitive + assumeThat(distribution().hasJdk, is(true)); + + assertRunsWithJavaHome(); + } + + public void test42BundledJdkRemoved() throws Exception { + assumeThat(installation, is(notNullValue())); + assumeThat(distribution().hasJdk, is(true)); + + Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + try { + mv(installation.bundledJdk, relocatedJdk); + assertRunsWithJavaHome(); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } + } + + public void test40StartServer() throws Exception { + String start = sh.runIgnoreExitCode("date ").stdout.trim(); + assumeThat(installation, is(notNullValue())); + + startElasticsearch(sh); + + String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " + + "--since \"" + start + "\" --output cat | wc -l").stdout.trim(); + assertThat(journalEntries, equalTo("0")); + + assertPathsExist(installation.pidDir.resolve("elasticsearch.pid")); + assertPathsExist(installation.logs.resolve("elasticsearch_server.json")); - startElasticsearch(); runElasticsearchTests(); - verifyPackageInstallation(installation, distribution()); // check startup script didn't change permissions + verifyPackageInstallation(installation, distribution(), sh); // check startup script didn't change permissions } - public void test50Remove() { + public void test50Remove() throws Exception { assumeThat(installation, is(notNullValue())); remove(distribution()); // removing must stop the service - final Shell sh = new Shell(); assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); if (isSystemd()) { @@ -156,14 +210,166 @@ public void test50Remove() { assertFalse(Files.exists(SYSTEMD_SERVICE)); } - public void test60Reinstall() { + public void test60Reinstall() throws Exception { assumeThat(installation, is(notNullValue())); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution()); + verifyPackageInstallation(installation, distribution(), sh); remove(distribution()); assertRemoved(distribution()); } + + public void test70RestartServer() throws Exception { + try { + installation = install(distribution()); + assertInstalled(distribution()); + + startElasticsearch(sh); + restartElasticsearch(sh); + runElasticsearchTests(); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + + public void test72TestRuntimeDirectory() throws Exception { + try { + installation = install(distribution()); + FileUtils.rm(installation.pidDir); + startElasticsearch(sh); + assertPathsExist(installation.pidDir); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + public void test73gcLogsExist() throws Exception { + installation = install(distribution()); + startElasticsearch(sh); + // it can be gc.log or gc.log.0.current + assertThat(installation.logs, fileWithGlobExist("gc.log*")); + stopElasticsearch(sh); + } + + // TEST CASES FOR SYSTEMD ONLY + + + /** + * # Simulates the behavior of a system restart: + * # the PID directory is deleted by the operating system + * # but it should not block ES from starting + * # see https://github.com/elastic/elasticsearch/issues/11594 + */ + public void test80DeletePID_DIRandRestart() throws Exception { + assumeTrue(isSystemd()); + + rm(installation.pidDir); + + sh.run("systemd-tmpfiles --create"); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + + assertTrue(Files.exists(pidFile)); + + stopElasticsearch(sh); + } + + public void test81CustomPathConfAndJvmOptions() throws Exception { + assumeTrue(isSystemd()); + + assumeThat(installation, is(notNullValue())); + assertPathsExist(installation.envFile); + + stopElasticsearch(sh); + + // The custom config directory is not under /tmp or /var/tmp because + // systemd's private temp directory functionally means different + // processes can have different views of what's in these directories + String randomName = RandomStrings.randomAsciiAlphanumOfLength(getRandom(), 10); + sh.run("mkdir /etc/"+randomName); + final Path tempConf = Paths.get("/etc/"+randomName); + + try { + mkdir(tempConf); + cp(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml")); + cp(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties")); + + // we have to disable Log4j from using JMX lest it will hit a security + // manager exception before we have configured logging; this will fail + // startup since we detect usages of logging before it is configured + final String jvmOptions = + "-Xms512m\n" + + "-Xmx512m\n" + + "-Dlog4j2.disable.jmx=true\n"; + append(tempConf.resolve("jvm.options"), jvmOptions); + + sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf); + + final Shell serverShell = newShell(); + cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup + append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n"); + append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops"); + + startElasticsearch(serverShell); + + final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); + assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912")); + assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\"")); + + stopElasticsearch(serverShell); + + } finally { + rm(installation.envFile); + cp(tempConf.resolve("elasticsearch.bk"), installation.envFile); + rm(tempConf); + cleanup(); + } + } + + public void test82SystemdMask() throws Exception { + try { + assumeTrue(isSystemd()); + + sh.run("systemctl mask systemd-sysctl.service"); + + installation = install(distribution()); + + sh.run("systemctl unmask systemd-sysctl.service"); + } finally { + cleanup(); + } + } + + public void test83serviceFileSetsLimits() throws Exception { + // Limits are changed on systemd platforms only + assumeTrue(isSystemd()); + + installation = install(distribution()); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + assertTrue(Files.exists(pidFile)); + String pid = slurp(pidFile).trim(); + String maxFileSize = sh.run("cat /proc/%s/limits | grep \"Max file size\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxFileSize, equalTo("unlimited")); + + String maxProcesses = sh.run("cat /proc/%s/limits | grep \"Max processes\" | awk '{ print $3 }'", pid).stdout.trim(); + assertThat(maxProcesses, equalTo("4096")); + + String maxOpenFiles = sh.run("cat /proc/%s/limits | grep \"Max open files\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxOpenFiles, equalTo("65535")); + + String maxAddressSpace = sh.run("cat /proc/%s/limits | grep \"Max address space\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxAddressSpace, equalTo("unlimited")); + + stopElasticsearch(sh); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackagingTestCase.java index 81d4a90b2a0ec..bd7738aeac4ac 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -27,6 +27,8 @@ import org.apache.commons.logging.LogFactory; import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.Installation; +import org.elasticsearch.packaging.util.Platforms; +import org.elasticsearch.packaging.util.Shell; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -62,7 +64,7 @@ public void setup() { protected static Installation installation; @BeforeClass - public static void cleanup() { + public static void cleanup() throws Exception { installation = null; cleanEverything(); } @@ -70,5 +72,19 @@ public static void cleanup() { /** The {@link Distribution} that should be tested in this case */ protected abstract Distribution distribution(); + protected Shell newShell() throws Exception { + Shell sh = new Shell(); + if (distribution().hasJdk == false) { + Platforms.onLinux(() -> { + String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("JAVA_HOME", systemJavaHome); + }); + Platforms.onWindows(() -> { + final String systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + sh.getEnv().put("JAVA_HOME", systemJavaHome); + }); + } + return sh; + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java index 36558ea2429cc..7b6ac039fc55c 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java @@ -55,7 +55,7 @@ public abstract class RpmPreservationTestCase extends PackagingTestCase { protected abstract Distribution distribution(); @BeforeClass - public static void cleanup() { + public static void cleanup() throws Exception { installation = null; cleanEverything(); } @@ -66,14 +66,14 @@ public void onlyCompatibleDistributions() { assumeTrue("only compatible distributions", distribution().packaging.compatible); } - public void test10Install() { + public void test10Install() throws Exception { assertRemoved(distribution()); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution()); + verifyPackageInstallation(installation, distribution(), newShell()); } - public void test20Remove() { + public void test20Remove() throws Exception { assumeThat(installation, is(notNullValue())); remove(distribution()); @@ -88,12 +88,12 @@ public void test20Remove() { assertFalse(Files.exists(installation.envFile)); } - public void test30PreserveConfig() { + public void test30PreserveConfig() throws Exception { final Shell sh = new Shell(); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution()); + verifyPackageInstallation(installation, distribution(), newShell()); sh.run("echo foobar | " + installation.executables().elasticsearchKeystore + " add --stdin foo.bar"); Stream.of( diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java index e9c514ea6bcb2..13ada93e6bef7 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java @@ -35,9 +35,9 @@ import java.util.Arrays; import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeTrue; -import static java.util.stream.Collectors.joining; import static org.elasticsearch.packaging.util.Archives.installArchive; import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; +import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -65,22 +65,15 @@ public void uninstallService() { } private Result runWithoutJava(String script) { - // on windows, removing java from PATH and removing JAVA_HOME is less involved than changing the permissions of the java - // executable. we also don't check permissions in the windows scripts anyway - final String originalPath = sh.run("$Env:PATH").stdout.trim(); - final String newPath = Arrays.stream(originalPath.split(";")) - .filter(path -> path.contains("Java") == false) - .collect(joining(";")); - - // note the lack of a $ when clearing the JAVA_HOME env variable - with a $ it deletes the java home directory - // https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/providers/environment-provider?view=powershell-6 - // - // this won't persist to another session so we don't have to reset anything - return sh.runIgnoreExitCode( - "$Env:PATH = '" + newPath + "'; " + - "Remove-Item Env:JAVA_HOME; " + - script - ); + final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + + try { + mv(installation.bundledJdk, relocatedJdk); + // ask for elasticsearch version to quickly exit if java is actually found (ie test failure) + return sh.runIgnoreExitCode(script); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } } private void assertService(String id, String status, String displayName) { @@ -110,7 +103,7 @@ private void assertCommand(String script) { } } - public void test10InstallArchive() { + public void test10InstallArchive() throws Exception { installation = installArchive(distribution()); verifyArchiveInstallation(installation, distribution()); serviceScript = installation.bin("elasticsearch-service.bat").toString(); @@ -135,7 +128,7 @@ public void test12InstallService() { public void test13InstallMissingJava() throws IOException { Result result = runWithoutJava(serviceScript + " install"); assertThat(result.exitCode, equalTo(1)); - assertThat(result.stderr, containsString("could not find java; set JAVA_HOME or ensure java is in PATH")); + assertThat(result.stderr, containsString("could not find java in JAVA_HOME or bundled")); } public void test14RemoveNotInstalled() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index ed579c35baf56..e557b47fb8912 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -19,7 +19,6 @@ package org.elasticsearch.packaging.util; -import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -37,15 +36,14 @@ import static org.elasticsearch.packaging.util.FileUtils.getDefaultArchiveInstallPath; import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; import static org.elasticsearch.packaging.util.FileUtils.lsGlob; - import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.isEmptyOrNullString; -import static org.hamcrest.core.Is.is; -import static org.hamcrest.collection.IsEmptyCollection.empty; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; import static org.junit.Assert.assertTrue; @@ -59,11 +57,11 @@ public class Archives { ? "vagrant" : "elasticsearch"; - public static Installation installArchive(Distribution distribution) { + public static Installation installArchive(Distribution distribution) throws Exception { return installArchive(distribution, getDefaultArchiveInstallPath(), getCurrentVersion()); } - public static Installation installArchive(Distribution distribution, Path fullInstallPath, String version) { + public static Installation installArchive(Distribution distribution, Path fullInstallPath, String version) throws Exception { final Shell sh = new Shell(); final Path distributionFile = getDistributionFile(distribution); @@ -255,11 +253,7 @@ private static void verifyDefaultInstallation(Installation es, Distribution dist ).forEach(configFile -> assertThat(es.config(configFile), file(File, owner, owner, p660))); } - public static void runElasticsearch(Installation installation) throws IOException { - runElasticsearch(installation, new Shell()); - } - - public static void runElasticsearch(Installation installation, Shell sh) throws IOException { + public static void runElasticsearch(Installation installation, Shell sh) throws Exception { final Path pidFile = installation.home.resolve("elasticsearch.pid"); final Installation.Executables bin = installation.executables(); @@ -309,7 +303,7 @@ public static void runElasticsearch(Installation installation, Shell sh) throws Platforms.onWindows(() -> sh.run("Get-Process -Id " + pid)); } - public static void stopElasticsearch(Installation installation) { + public static void stopElasticsearch(Installation installation) throws Exception { Path pidFile = installation.home.resolve("elasticsearch.pid"); assertTrue(Files.exists(pidFile)); String pid = slurp(pidFile).trim(); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java index fda61e9fb36e5..f9b98d58ccacc 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Cleanup.java @@ -50,7 +50,7 @@ public class Cleanup { // todo private static final List ELASTICSEARCH_FILES_WINDOWS = Collections.emptyList(); - public static void cleanEverything() { + public static void cleanEverything() throws Exception { final Shell sh = new Shell(); // kill elasticsearch processes diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java index 5ed1bcb5a3d1f..b73438bc4c952 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java @@ -23,42 +23,59 @@ public enum Distribution { - OSS_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS), - OSS_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS), - OSS_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS), - OSS_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS), - OSS_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS), - - DEFAULT_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT), - DEFAULT_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT), - DEFAULT_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT), - DEFAULT_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT), - DEFAULT_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT); + OSS_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS, true), + OSS_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS, true), + OSS_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS, true), + OSS_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS, true), + OSS_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS, true), + + DEFAULT_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT, true), + DEFAULT_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT, true), + DEFAULT_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT, true), + DEFAULT_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT, true), + DEFAULT_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT, true), + + OSS_NO_JDK_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS, false), + OSS_NO_JDK_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS, false), + OSS_NO_JDK_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS, false), + OSS_NO_JDK_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS, false), + OSS_NO_JDK_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS, false), + + DEFAULT_NO_JDK_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT, false), + DEFAULT_NO_JDK_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT, false), + DEFAULT_NO_JDK_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT, false), + DEFAULT_NO_JDK_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT, false), + DEFAULT_NO_JDK_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT, false); public final Packaging packaging; public final Platform platform; public final Flavor flavor; + public final boolean hasJdk; - Distribution(Packaging packaging, Platform platform, Flavor flavor) { + Distribution(Packaging packaging, Platform platform, Flavor flavor, boolean hasJdk) { this.packaging = packaging; this.platform = platform; this.flavor = flavor; + this.hasJdk = hasJdk; } public String filename(String version) { - String architecture = ""; + String classifier = ""; if (version.startsWith("6.") == false) { + if (hasJdk == false) { + classifier += "-no-jdk"; + } if (packaging == Packaging.DEB) { - architecture = "-amd64"; + classifier += "-amd64"; } else { if (packaging != Packaging.RPM) { - architecture = "-" + platform.toString(); + classifier += "-" + platform.toString(); } - architecture += "-x86_64"; + classifier += "-x86_64"; } } - return flavor.name + "-" + version + architecture + packaging.extension; + return flavor.name + "-" + version + classifier + packaging.extension; } public boolean isDefault() { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java index 10d1b3ee6b6de..efbf0bd74a354 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java @@ -20,6 +20,8 @@ package org.elasticsearch.packaging.util; import org.elasticsearch.core.internal.io.IOUtils; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; import java.io.BufferedWriter; import java.io.IOException; @@ -34,9 +36,11 @@ import java.nio.file.attribute.PosixFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.core.IsNot.not; import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString; import static org.junit.Assert.assertFalse; @@ -69,6 +73,15 @@ public static void rm(Path... paths) { } } + public static Path mktempDir(Path path) { + try { + return Files.createTempDirectory(path,"tmp"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Path mkdir(Path path) { try { return Files.createDirectories(path); @@ -176,6 +189,20 @@ public static void assertPathsExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertTrue(path + " should exist", Files.exists(path))); } + public static Matcher fileWithGlobExist(String glob) throws IOException { + return new FeatureMatcher>(not(emptyIterable()),"File with pattern exist", "file with pattern"){ + + @Override + protected Iterable featureValueOf(Path actual) { + try { + return Files.newDirectoryStream(actual,glob); + } catch (IOException e) { + return Collections.emptyList(); + } + } + }; + } + public static void assertPathsDontExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertFalse(path + " should not exist", Files.exists(path))); } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java index 41b4fb9755654..9e3ba5b52e284 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Installation.java @@ -30,6 +30,7 @@ public class Installation { public final Path home; public final Path bin; // this isn't a first-class installation feature but we include it for convenience public final Path lib; // same + public final Path bundledJdk; public final Path config; public final Path data; public final Path logs; @@ -42,7 +43,7 @@ public Installation(Path home, Path config, Path data, Path logs, Path plugins, this.home = home; this.bin = home.resolve("bin"); this.lib = home.resolve("lib"); - + this.bundledJdk = home.resolve("jdk"); this.config = config; this.data = data; this.logs = logs; @@ -103,6 +104,9 @@ public class Executables { public final Path elasticsearchCertutil = platformExecutable("elasticsearch-certutil"); public final Path elasticsearchShard = platformExecutable("elasticsearch-shard"); public final Path elasticsearchNode = platformExecutable("elasticsearch-node"); + public final Path elasticsearchSetupPasswords = platformExecutable("elasticsearch-setup-passwords"); + public final Path elasticsearchSyskeygen = platformExecutable("elasticsearch-syskeygen"); + public final Path elasticsearchUsers = platformExecutable("elasticsearch-users"); private Path platformExecutable(String name) { final String platformExecutableName = Platforms.WINDOWS diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index f2226bfb0c4e3..c5dcc34af882f 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -22,9 +22,11 @@ import org.elasticsearch.packaging.util.Shell.Result; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.regex.Pattern; import java.util.stream.Stream; @@ -52,14 +54,14 @@ public class Packages { public static final Path SYSVINIT_SCRIPT = Paths.get("/etc/init.d/elasticsearch"); public static final Path SYSTEMD_SERVICE = Paths.get("/usr/lib/systemd/system/elasticsearch.service"); - public static void assertInstalled(Distribution distribution) { + public static void assertInstalled(Distribution distribution) throws Exception { final Result status = packageStatus(distribution); assertThat(status.exitCode, is(0)); Platforms.onDPKG(() -> assertFalse(Pattern.compile("(?m)^Status:.+deinstall ok").matcher(status.stdout).find())); } - public static void assertRemoved(Distribution distribution) { + public static void assertRemoved(Distribution distribution) throws Exception { final Result status = packageStatus(distribution); Platforms.onRPM(() -> assertThat(status.exitCode, is(1))); @@ -88,25 +90,31 @@ public static Result packageStatus(Distribution distribution) { return result; } - public static Installation install(Distribution distribution) { + public static Installation install(Distribution distribution) throws IOException { return install(distribution, getCurrentVersion()); } - public static Installation install(Distribution distribution, String version) { - final Result result = runInstallCommand(distribution, version); + public static Installation install(Distribution distribution, String version) throws IOException { + Shell sh = new Shell(); + String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + if (distribution.hasJdk == false) { + sh.getEnv().put("JAVA_HOME", systemJavaHome); + } + final Result result = runInstallCommand(distribution, version, sh); if (result.exitCode != 0) { throw new RuntimeException("Installing distribution " + distribution + " version " + version + " failed: " + result); } - return Installation.ofPackage(distribution.packaging); - } + Installation installation = Installation.ofPackage(distribution.packaging); - public static Result runInstallCommand(Distribution distribution) { - return runInstallCommand(distribution, getCurrentVersion()); + if (distribution.hasJdk == false) { + Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8), + StandardOpenOption.APPEND); + } + return installation; } - public static Result runInstallCommand(Distribution distribution, String version) { - final Shell sh = new Shell(); + public static Result runInstallCommand(Distribution distribution, String version, Shell sh) { final Path distributionFile = getDistributionFile(distribution, version); if (Platforms.isRPM()) { @@ -125,7 +133,7 @@ public static Result runInstallCommand(Distribution distribution, String version } } - public static void remove(Distribution distribution) { + public static void remove(Distribution distribution) throws Exception { final Shell sh = new Shell(); Platforms.onRPM(() -> { @@ -142,16 +150,15 @@ public static void remove(Distribution distribution) { }); } - public static void verifyPackageInstallation(Installation installation, Distribution distribution) { - verifyOssInstallation(installation, distribution); + public static void verifyPackageInstallation(Installation installation, Distribution distribution, Shell sh) { + verifyOssInstallation(installation, distribution, sh); if (distribution.flavor == Distribution.Flavor.DEFAULT) { verifyDefaultInstallation(installation); } } - private static void verifyOssInstallation(Installation es, Distribution distribution) { - final Shell sh = new Shell(); + private static void verifyOssInstallation(Installation es, Distribution distribution, Shell sh) { sh.run("id elasticsearch"); sh.run("getent group elasticsearch"); @@ -263,8 +270,7 @@ private static void verifyDefaultInstallation(Installation es) { ).forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660))); } - public static void startElasticsearch() throws IOException { - final Shell sh = new Shell(); + public static void startElasticsearch(Shell sh) throws IOException { if (isSystemd()) { sh.run("systemctl daemon-reload"); sh.run("systemctl enable elasticsearch.service"); @@ -274,6 +280,10 @@ public static void startElasticsearch() throws IOException { sh.run("service elasticsearch start"); } + assertElasticsearchStarted(sh); + } + + public static void assertElasticsearchStarted(Shell sh) throws IOException { waitForElasticsearch(); if (isSystemd()) { @@ -283,4 +293,22 @@ public static void startElasticsearch() throws IOException { sh.run("service elasticsearch status"); } } + + public static void stopElasticsearch(Shell sh) throws IOException { + if (isSystemd()) { + sh.run("systemctl stop elasticsearch.service"); + } else { + sh.run("service elasticsearch stop"); + } + } + + public static void restartElasticsearch(Shell sh) throws IOException { + if (isSystemd()) { + sh.run("systemctl restart elasticsearch.service"); + } else { + sh.run("service elasticsearch restart"); + } + + waitForElasticsearch(); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java index dbac9c88d26c9..6258c1336b2fc 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java @@ -65,25 +65,25 @@ public static boolean isSysVInit() { return new Shell().runIgnoreExitCode("which service").isSuccess(); } - public static void onWindows(PlatformAction action) { + public static void onWindows(PlatformAction action) throws Exception { if (WINDOWS) { action.run(); } } - public static void onLinux(PlatformAction action) { + public static void onLinux(PlatformAction action) throws Exception { if (LINUX) { action.run(); } } - public static void onRPM(PlatformAction action) { + public static void onRPM(PlatformAction action) throws Exception { if (isRPM()) { action.run(); } } - public static void onDPKG(PlatformAction action) { + public static void onDPKG(PlatformAction action) throws Exception { if (isDPKG()) { action.run(); } @@ -94,6 +94,6 @@ public static void onDPKG(PlatformAction action) { */ @FunctionalInterface public interface PlatformAction { - void run(); + void run() throws Exception; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java index 5853bc2daa148..dc490de05b9c8 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java @@ -27,41 +27,32 @@ import java.io.InputStreamReader; import java.nio.file.Path; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.stream.Stream; -import static java.util.Collections.emptyMap; - /** * Wrapper to run shell commands and collect their outputs in a less verbose way */ public class Shell { final Map env; - final Path workingDirectory; + Path workingDirectory; public Shell() { - this(emptyMap(), null); - } - - public Shell(Map env) { - this(env, null); + this.env = new HashMap<>(); + this.workingDirectory = null; } - public Shell(Path workingDirectory) { - this(emptyMap(), workingDirectory); + public Map getEnv() { + return env; } - public Shell(Map env, Path workingDirectory) { - this.env = new HashMap<>(env); + public void setWorkingDirectory(Path workingDirectory) { this.workingDirectory = workingDirectory; } - public Map getEnv() { - return env; - } - /** * Run the provided string as a shell script. On Linux the {@code bash -c [script]} syntax will be used, and on Windows * the {@code powershell.exe -Command [script]} syntax will be used. Throws an exception if the exit code of the script is nonzero @@ -77,6 +68,10 @@ public Result runIgnoreExitCode(String script) { return runScriptIgnoreExitCode(getScriptCommand(script)); } + public Result run( String command, Object... args) { + String formattedCommand = String.format(Locale.ROOT, command, args); + return run(formattedCommand); + } private String[] getScriptCommand(String script) { if (Platforms.WINDOWS) { return powershellCommand(script); @@ -105,6 +100,7 @@ private Result runScriptIgnoreExitCode(String[] command) { ProcessBuilder builder = new ProcessBuilder(); builder.command(command); + if (workingDirectory != null) { setWorkingDirectory(builder, workingDirectory); } diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats deleted file mode 100644 index 8baa75f38f5bc..0000000000000 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the elasticsearch Systemd setup. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It should only be executed -# in a throw-away VM like those made by the Vagrantfile at -# the root of the Elasticsearch source code. This should -# cause the script to fail if it is executed any other way: -[ -f /etc/is_vagrant_vm ] || { - >&2 echo "must be run on a vagrant VM" - exit 1 -} - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load $BATS_UTILS/utils.bash -load $BATS_UTILS/packages.bash -load $BATS_UTILS/plugins.bash - -# Cleans everything for the 1st execution -setup() { - skip_not_systemd - skip_not_dpkg_or_rpm - export_elasticsearch_paths -} - -@test "[SYSTEMD] install elasticsearch" { - clean_before_test - install_package -} - -@test "[SYSTEMD] daemon reload after install" { - systemctl daemon-reload -} - -@test "[SYSTEMD] daemon isn't enabled on restart" { - # Rather than restart the VM we just ask systemd if it plans on starting - # elasticsearch on restart. Not as strong as a restart but much much - # faster. - run systemctl is-enabled elasticsearch.service - [ "$output" = "disabled" ] -} - -@test "[SYSTEMD] enable" { - systemctl enable elasticsearch.service - - systemctl is-enabled elasticsearch.service -} - -@test "[SYSTEMD] start" { - # Capture the current epoch in millis - run date +%s - epoch="$output" - - # The OpenJDK packaged for CentOS and OEL both override the default value (false) for the JVM option "AssumeMP". - # - # Because it is forced to "true" by default for these packages, the following warning message is printed to the - # standard output when the Vagrant box has only 1 CPU: - # OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure - # the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N - # - # This message will then fail the next test where we check if no entries have been added to the journal. - # - # This message appears since with java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64 because of the commit: - # 2016-10-10 - Andrew Hughes - 1:1.8.0.111-1.b15 - Turn debug builds on for all JIT architectures. - # Always AssumeMP on RHEL. - # - Resolves: rhbz#1381990 - # - if [ -x "$(command -v lsb_release)" ]; then - # Here we set the "-XX:-AssumeMP" option to false again: - lsb_release=$(lsb_release -i) - if [[ "$lsb_release" =~ "CentOS" ]] || [[ "$lsb_release" =~ "OracleServer" ]]; then - echo "-XX:-AssumeMP" >> $ESCONFIG/jvm.options - fi - fi - - systemctl start elasticsearch.service - wait_for_elasticsearch_status - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" - - # Converts the epoch back in a human readable format - run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" - since="$output" - - # Verifies that no new entries in journald have been added - # since the last start - result="$(journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" --output cat | wc -l)" - [ "$result" -eq "0" ] || { - echo "Expected no entries in journalctl for the Elasticsearch service but found:" - journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" - false - } -} - -@test "[SYSTEMD] start (running)" { - systemctl start elasticsearch.service -} - -@test "[SYSTEMD] is active (running)" { - run systemctl is-active elasticsearch.service - [ "$status" -eq 0 ] - [ "$output" = "active" ] -} - -@test "[SYSTEMD] status (running)" { - systemctl status elasticsearch.service -} - -################################## -# Check that Elasticsearch is working -################################## -@test "[SYSTEMD] test elasticsearch" { - run_elasticsearch_tests -} - -@test "[SYSTEMD] restart" { - systemctl restart elasticsearch.service - - wait_for_elasticsearch_status - - service elasticsearch status -} - -@test "[SYSTEMD] stop (running)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopping)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -@test "[SYSTEMD] stop (stopped)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopped)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -# Simulates the behavior of a system restart: -# the PID directory is deleted by the operating system -# but it should not block ES from starting -# see https://github.com/elastic/elasticsearch/issues/11594 -@test "[SYSTEMD] delete PID_DIR and restart" { - rm -rf /var/run/elasticsearch - - systemd-tmpfiles --create - - systemctl start elasticsearch.service - - wait_for_elasticsearch_status - - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] start Elasticsearch with custom JVM options" { - assert_file_exist $ESENVFILE - # The custom config directory is not under /tmp or /var/tmp because - # systemd's private temp directory functionally means different - # processes can have different views of what's in these directories - local temp=`mktemp -p /etc -d` - cp "$ESCONFIG"/elasticsearch.yml "$temp" - cp "$ESCONFIG"/log4j2.properties "$temp" - touch "$temp/jvm.options" - chown -R elasticsearch:elasticsearch "$temp" - echo "-Xms512m" >> "$temp/jvm.options" - echo "-Xmx512m" >> "$temp/jvm.options" - # we have to disable Log4j from using JMX lest it will hit a security - # manager exception before we have configured logging; this will fail - # startup since we detect usages of logging before it is configured - echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options" - cp $ESENVFILE "$temp/elasticsearch" - echo "ES_PATH_CONF=\"$temp\"" >> $ESENVFILE - echo "ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"" >> $ESENVFILE - service elasticsearch start - wait_for_elasticsearch_status - curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912' - curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"' - service elasticsearch stop - cp "$temp/elasticsearch" $ESENVFILE -} - -@test "[SYSTEMD] masking systemd-sysctl" { - clean_before_test - - systemctl mask systemd-sysctl.service - install_package - - systemctl unmask systemd-sysctl.service -} - -@test "[SYSTEMD] service file sets limits" { - clean_before_test - install_package - systemctl start elasticsearch.service - wait_for_elasticsearch_status - local pid=$(cat /var/run/elasticsearch/elasticsearch.pid) - local max_file_size=$(cat /proc/$pid/limits | grep "Max file size" | awk '{ print $4 }') - [ "$max_file_size" == "unlimited" ] - local max_processes=$(cat /proc/$pid/limits | grep "Max processes" | awk '{ print $3 }') - [ "$max_processes" == "4096" ] - local max_open_files=$(cat /proc/$pid/limits | grep "Max open files" | awk '{ print $4 }') - [ "$max_open_files" == "65535" ] - local max_address_space=$(cat /proc/$pid/limits | grep "Max address space" | awk '{ print $4 }') - [ "$max_address_space" == "unlimited" ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] test runtime directory" { - clean_before_test - install_package - sudo rm -rf /var/run/elasticsearch - systemctl start elasticsearch.service - wait_for_elasticsearch_status - [ -d /var/run/elasticsearch ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] GC logs exist" { - start_elasticsearch_service - assert_file_exist /var/log/elasticsearch/gc.log.0.current - stop_elasticsearch_service -} diff --git a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats index af0c1280b2dc3..697e6456d1f24 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats @@ -60,8 +60,17 @@ setup() { install_package -v $(cat upgrade_from_version) } +@test "[UPGRADE] modify keystore" { + # deliberately modify the keystore to force it to be preserved during package upgrade + export_elasticsearch_paths + sudo -E "$ESHOME/bin/elasticsearch-keystore" remove keystore.seed + sudo -E echo keystore_seed | "$ESHOME/bin/elasticsearch-keystore" add -x keystore.seed +} + @test "[UPGRADE] start old version" { + export JAVA_HOME=$SYSTEM_JAVA_HOME start_elasticsearch_service + unset JAVA_HOME } @test "[UPGRADE] check elasticsearch version is old version" { diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 5b42a8744beb4..8cf5d0cc349ef 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -162,22 +162,6 @@ fi remove_plugin_example } -@test "[$GROUP] fail if java executable is not found" { - [ "$GROUP" == "TAR PLUGINS" ] || skip "Test case only supported by TAR PLUGINS" - local JAVA=$(which java) - - sudo chmod -x $JAVA - run "$ESHOME/bin/elasticsearch-plugin" - sudo chmod +x $JAVA - - [ "$status" -eq 1 ] - local expected="could not find java; set JAVA_HOME or ensure java is in PATH" - [[ "$output" == *"$expected"* ]] || { - echo "Expected error message [$expected] but found: $output" - false - } -} - # Note that all of the tests from here to the end of the file expect to be run # in sequence and don't take well to being run one at a time. @test "[$GROUP] install a sample plugin" { diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index 0285e3682b3f7..a38f36c3d14c6 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -30,6 +30,14 @@ # specific language governing permissions and limitations # under the License. +env_file() { + if is_dpkg; then + echo "/etc/default/elasticsearch" + fi + if is_rpm; then + echo "/etc/sysconfig/elasticsearch" + fi +} # Export some useful paths. export_elasticsearch_paths() { @@ -40,15 +48,11 @@ export_elasticsearch_paths() { export ESDATA="/var/lib/elasticsearch" export ESLOG="/var/log/elasticsearch" export ESPIDDIR="/var/run/elasticsearch" - if is_dpkg; then - export ESENVFILE="/etc/default/elasticsearch" - fi - if is_rpm; then - export ESENVFILE="/etc/sysconfig/elasticsearch" - fi + export ESENVFILE=$(env_file) export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"} } + # Install the rpm or deb package. # -u upgrade rather than install. This only matters for rpm. # -v the version to upgrade to. Defaults to the version under test. @@ -94,6 +98,9 @@ install_package() { else skip "Only rpm or deb supported" fi + + # pass through java home to package + echo "JAVA_HOME=\"$SYSTEM_JAVA_HOME\"" >> $(env_file) } # Checks that all directories & files are correctly installed after a deb or diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index a4cfa26875546..42c2195dd13fa 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -52,9 +52,9 @@ for (Version version : bwcVersions.indexCompatible) { bwcTest.dependsOn(versionBwcTest) } -unitTest.enabled = false +test.enabled = false -task integTest { +task bwcTestSnapshots { if (project.bwc_tests_enabled) { final def version = bwcVersions.unreleasedIndexCompatible.first() dependsOn "v${version}#bwcTest" @@ -83,4 +83,4 @@ task verifyDocsLuceneVersion { } } -check.dependsOn integTest, verifyDocsLuceneVersion +check.dependsOn bwcTestSnapshots, verifyDocsLuceneVersion diff --git a/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java b/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java index 56297eaab8857..bba6b4a8aa27f 100644 --- a/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java +++ b/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java @@ -37,7 +37,7 @@ public void testLuceneVersionConstant() throws IOException, ParseException { assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); final ObjectPath objectPath = ObjectPath.createFromResponse(response); final String elasticsearchVersionString = objectPath.evaluate("version.number").toString(); - final Version elasticsearchVersion = Version.fromString(elasticsearchVersionString); + final Version elasticsearchVersion = Version.fromString(elasticsearchVersionString.replace("-SNAPSHOT", "")); final String luceneVersionString = objectPath.evaluate("version.lucene_version").toString(); final org.apache.lucene.util.Version luceneVersion = org.apache.lucene.util.Version.parse(luceneVersionString); assertThat(elasticsearchVersion.luceneVersion, equalTo(luceneVersion)); diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index fce27e6ab8a36..dcbf5253bb085 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -37,7 +37,11 @@ int managementPort repositories { // the Wildfly distribution is not available via a repository, so we fake an Ivy repository on top of the download site ivy { - url "http://download.jboss.org" + name "wildfly" + url "https://download.jboss.org" + metadataSources { + artifact() + } patternLayout { artifact 'wildfly/[revision]/[module]-[revision].[ext]' } @@ -211,7 +215,7 @@ if (!Os.isFamily(Os.FAMILY_WINDOWS)) { check.dependsOn(integTest) -unitTest.enabled = false +test.enabled = false dependencyLicenses.enabled = false dependenciesInfo.enabled = false diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 915c3a68f6e0e..d95ad476682b1 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -2,5 +2,5 @@ apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -unitTest.enabled = false +test.enabled = false jarHell.enabled = false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index ac3024c9c245d..0fd17b791b9ba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -58,14 +58,6 @@ "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, - "_source_exclude": { - "type" : "list", - "description" : "A list of fields to exclude from the returned _source field" - }, - "_source_include": { - "type" : "list", - "description" : "A list of fields to extract and return from the _source field" - }, "version" : { "type" : "number", "description" : "Explicit version number for concurrency control" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json index 1c515e4509581..c3b51de862097 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json @@ -1,6 +1,6 @@ { "ingest.delete_pipeline": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html", "methods": [ "DELETE" ], "url": { "path": "/_ingest/pipeline/{id}", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json index 317250874233b..16a07e072b771 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json @@ -1,6 +1,6 @@ { "ingest.get_pipeline": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html", "methods": [ "GET" ], "url": { "path": "/_ingest/pipeline/{id}", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json index 55afada728122..bf40be853e2d1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.processor_grok.json @@ -1,6 +1,6 @@ { "ingest.processor_grok": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get", "methods": [ "GET" ], "url": { "path": "/_ingest/processor/grok", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json index e4c3c2eb3f9a3..1ea77901d8dbd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json @@ -1,6 +1,6 @@ { "ingest.put_pipeline": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html", "methods": [ "PUT" ], "url": { "path": "/_ingest/pipeline/{id}", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json index d02f97d81dd70..c16008ad6b659 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json @@ -1,6 +1,6 @@ { "ingest.simulate": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html", "methods": [ "GET", "POST" ], "url": { "path": "/_ingest/pipeline/_simulate", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 398dcbd29515d..1ba439ab62fba 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -36,8 +36,8 @@ }, "max_concurrent_shard_requests" : { "type" : "number", - "description" : "The number of concurrent shard requests each sub search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default grows with the number of nodes in the cluster but is at most 256." + "description" : "The number of concurrent shard requests each sub search executes concurrently per node. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", + "default" : 5 }, "rest_total_hits_as_int" : { "type" : "boolean", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index fc701c29d6021..e85eadb5bc41e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -30,6 +30,11 @@ "default": 0, "description": "The throttle to set on this request in sub-requests per second. -1 means no throttle." }, + "scroll": { + "type" : "time", + "description" : "Control how long to keep the search context alive", + "default" : "5m" + }, "slices": { "type": "number", "default": 1, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index f44c0f74b2c3d..29afd7b58b163 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -185,7 +185,7 @@ "max_concurrent_shard_requests" : { "type" : "number", "description" : "The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests", - "default" : "The default is 5." + "default" : 5 }, "pre_filter_shard_size" : { "type" : "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml index ceed71c18e491..b443e322f80f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml @@ -5,16 +5,6 @@ - is_true: master_node ---- -"get cluster state returns cluster state size with human readable format": - - do: - cluster.state: - human: true - - - is_true: master_node - - gte: { compressed_size_in_bytes: 50 } - - is_true: compressed_size - --- "get cluster state returns cluster_uuid at the top level": - skip: @@ -27,5 +17,3 @@ - is_true: cluster_uuid - is_true: master_node - - gte: { compressed_size_in_bytes: 50 } - - is_true: compressed_size diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml index 1eab9d6159764..a05134866628b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -124,6 +124,11 @@ index: test-1 body: { bar: 42 } +# ensures dynamic mapping update is visible to get_mapping + - do: + cluster.health: + wait_for_events: normal + - do: indices.get_mapping: include_type_name: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml index 7c6136d273979..13cb3321841cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -55,8 +55,8 @@ "PUT mapping with _doc on an index that has types": - skip: - version: "all" - reason: include_type_name is only supported as of 6.7 # AwaitsFix: https://github.com/elastic/elasticsearch/issues/38202 + version: " - 6.6.99" + reason: include_type_name is only supported as of 6.7 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 8532b40fbc1e1..d44473b79fa2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -242,8 +242,11 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 6.2.99" - reason: this uses a new option (format) added in 6.3.0 + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42679" +# - skip: +# version: " - 6.2.99" +# reason: this uses a new option (format) added in 6.3.0 - do: search: @@ -259,7 +262,7 @@ setup: "date_histogram": { "field": "date", "interval": "1d", - "format": "yyyy-MM-dd" + "format": "strict_date" } } } @@ -289,7 +292,7 @@ setup: "date_histogram": { "field": "date", "interval": "1d", - "format": "yyyy-MM-dd" + "format": "strict_date" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml index e9fb959406e0e..65416f23feaab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/140_pre_filter_search_shards.yml @@ -9,7 +9,7 @@ setup: properties: created_at: type: date - format: "yyyy-MM-dd" + format: "strict_date" - do: indices.create: index: index_2 @@ -20,7 +20,7 @@ setup: properties: created_at: type: date - format: "yyyy-MM-dd" + format: "strict_date" - do: indices.create: index: index_3 @@ -31,11 +31,14 @@ setup: properties: created_at: type: date - format: "yyyy-MM-dd" + format: "strict_date" --- "pre_filter_shard_size with invalid parameter": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42679" - do: catch: /preFilterShardSize must be >= 1/ search: @@ -45,6 +48,9 @@ setup: --- "pre_filter_shard_size with shards that have no hit": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/42679" - do: index: index: index_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml index e9ba863675dfa..0bd139a070bc2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml @@ -1,8 +1,8 @@ --- "Test Index and Search locale dependent mappings / dates": - skip: - version: " - 6.1.99" - reason: JDK9 only supports this with a special sysproperty added in 6.2.0 + version: "all" + reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/39981(Previously: JDK9 only supports this with a special sysproperty added in 6.2.0.)" - do: indices.create: index: test_index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_total_hits_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_total_hits_object.yml index 8823fc8922b67..07a7dbfe152ae 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_total_hits_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_total_hits_object.yml @@ -106,7 +106,7 @@ setup: - do: search: - size: 3 + size: 0 track_total_hits: 4 body: query: @@ -116,6 +116,18 @@ setup: - match: {hits.total.value: 4} - match: {hits.total.relation: gte} + - do: + search: + size: 0 + track_total_hits: 5 + body: + query: + match: + foo: bar + + - match: {hits.total.value: 5} + - match: {hits.total.relation: eq} + - do: catch: /\[rest_total_hits_as_int\] cannot be used if the tracking of total hits is not accurate, got 100/ search: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml index 03f218b140b8f..653979073b707 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yml @@ -64,10 +64,9 @@ - length: { shards: 1 } - match: { shards.0.0.index: test_index } - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2]} - - match: { indices.test_index.filter.bool.should.0.term.field.value: value1 } + - length: { indices.test_index.filter.bool.should: 2 } - lte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } - gte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } - - match: { indices.test_index.filter.bool.should.1.term.field.value: value2} - lte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } - gte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } - match: { indices.test_index.filter.bool.adjust_pure_negative: true} diff --git a/server/build.gradle b/server/build.gradle index 4fc8c451c9b96..50f142cddc1dd 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -17,8 +17,6 @@ * under the License. */ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' @@ -331,14 +329,15 @@ dependencyLicenses { } if (isEclipse == false || project.path == ":server-tests") { - task integTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { + task integTest(type: Test) { + description = 'Multi-node tests' + mustRunAfter test + include '**/*IT.class' } + check.dependsOn integTest - integTest.mustRunAfter test + task internalClusterTest { dependsOn integTest } diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 4dc7f4b4ca6bf..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4d286ed7940fc206331424e8ce577584208eba3 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..3d51b62b1854d --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0.jar.sha1 @@ -0,0 +1 @@ +26fdada04adbb02164ef2d0f9abfa3b46ec30a0b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index df0783e11fee6..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5416da7370f5def9a79fb1cccb091b091b808a5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..e8ac2167ec45c --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0.jar.sha1 @@ -0,0 +1 @@ +90bda2357016dc0f4582938b01f5ae1142089d5f \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 7a4b68f8d857d..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0.jar.sha1 b/server/licenses/lucene-core-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..2619abce7979a --- /dev/null +++ b/server/licenses/lucene-core-8.0.0.jar.sha1 @@ -0,0 +1 @@ +407c555efb2d3253f51a676cc2089a5d29a3b7b7 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index e1933653b9f4d..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -045dcae6368a436cec6e56f654a9c6b3a4656d17 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0.jar.sha1 b/server/licenses/lucene-grouping-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..86294d2987fd7 --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0.jar.sha1 @@ -0,0 +1 @@ +a9004071d79e9f1eb5f2fe81c4b2b736d9d838bf \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 85aff2f06dd49..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -63b7164d24547cea36df3d81eedb9e8b424cf3c2 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..ac8a49302cabd --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0.jar.sha1 @@ -0,0 +1 @@ +95c55c400dcfd5e08da1bab4f33eb3b6a65b1d16 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 41b3910da1319..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b3358889c491237709e8aea4a39e816227f3d26 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0.jar.sha1 b/server/licenses/lucene-join-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..88f0cdb3238c4 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0.jar.sha1 @@ -0,0 +1 @@ +9364f8fd4fff476e619e84cb22f4cb0108039eda \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 22318a34bf09e..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5d9566e2b3be41c81fe42df1c57fff31fcdbc565 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0.jar.sha1 b/server/licenses/lucene-memory-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..7dc4ebb2f5806 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0.jar.sha1 @@ -0,0 +1 @@ +9cce58e14792735cb7dc85fc84239193521d45eb \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 2c4429c8c7619..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -45574e22c4b1569a2aecd5b2837388c7287892b5 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0.jar.sha1 b/server/licenses/lucene-misc-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..98c3fd2026362 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0.jar.sha1 @@ -0,0 +1 @@ +c444c2c41ab46744e2bbc61df5bdd2ac62ffe6a5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index ae11e5858a28c..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -67405660330db8c09e0994a615cf3ab95d7bc151 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0.jar.sha1 b/server/licenses/lucene-queries-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..457b5fc9cba15 --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0.jar.sha1 @@ -0,0 +1 @@ +197e4cf95fcbc787f128a33e4675528cfee65065 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index fcb52584b667c..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c8de64bf2c4f09766d4cd62e8dd403016665f37c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..b1764ad6bc05f --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0.jar.sha1 @@ -0,0 +1 @@ +aa7d2e07736356405b4bece971d0a9ff1036dac3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 80750b2d41e33..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fc080b45b881a78a23743df9a7388fd3dbbb9d66 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..0cdbafa3e1ff3 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0.jar.sha1 @@ -0,0 +1 @@ +12f32d95596ff55c43c4c2378bf26e9fe3ea7dd9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 32f5ca7196e17..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e0e12b9882005b87ef39325e1fc8539c8caff31 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0.jar.sha1 b/server/licenses/lucene-spatial-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..efa94df2e7319 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0.jar.sha1 @@ -0,0 +1 @@ +5cd61c5c166a69571f39178b50d304d6e3914050 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 2cf474908d1ba..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ed9c9a03e1a15840237d09ca4da7aadfce1c780 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..c3e6b2ac92cee --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0.jar.sha1 @@ -0,0 +1 @@ +72474064a247566c4c759eda1dfaac4d48778cd1 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index c4016fb692e2f..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdf0efdeb9a73a6b0f8349df21049ecbc73955d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..f83e1d443a2a0 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0.jar.sha1 @@ -0,0 +1 @@ +d40eb969881f58b47bace23865a1d5a9dd4ebf0a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 7ea28b0ed87da..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c69399183d5f9f85f3f8130452d0bed62fd92440 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0.jar.sha1 b/server/licenses/lucene-suggest-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..6b60f4196d5f0 --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0.jar.sha1 @@ -0,0 +1 @@ +57ebd0c31e90f5f73aad7dbf7448cd59d8418f03 \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index dd3ac992475b9..c696d476bbb43 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -113,23 +113,19 @@ protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader // TODO: Maybe it could also make sense to assume independent distributions of documents and eg. have: // df = df1 + df2 - (df1 * df2 / maxDoc)? max = Math.max(df, max); - if (minSumTTF != -1 && ctx.totalTermFreq() != -1) { + if (ctx.totalTermFreq() > 0) { // we need to find out the minimum sumTTF to adjust the statistics // otherwise the statistics don't match minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field())); - } else { - minSumTTF = -1; } - } - if (minSumTTF != -1 && maxDoc > minSumTTF) { + if (maxDoc > minSumTTF) { maxDoc = (int)minSumTTF; } - if (max == 0) { return; // we are done that term doesn't exist at all } - long sumTTF = minSumTTF == -1 ? -1 : 0; + long sumTTF = 0; final int[] tieBreak = new int[contexts.length]; for (int i = 0; i < tieBreak.length; ++i) { tieBreak[i] = i; @@ -165,11 +161,7 @@ protected int compare(int i, int j) { } contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf)); prev = current; - if (sumTTF >= 0 && ctx.totalTermFreq() >= 0) { - sumTTF += ctx.totalTermFreq(); - } else { - sumTTF = -1; // omit once TF is omitted anywhere! - } + sumTTF += ctx.totalTermFreq(); } sumTTF = Math.min(sumTTF, minSumTTF); for (int i = 0; i < contexts.length; i++) { @@ -177,17 +169,12 @@ protected int compare(int i, int j) { if (df == 0) { continue; } - // the blended sumTTF can't be greater than the sumTTTF on the field - final long fixedTTF = sumTTF == -1 ? -1 : sumTTF; - contexts[i] = adjustTTF(reader.getContext(), contexts[i], fixedTTF); + contexts[i] = adjustTTF(reader.getContext(), contexts[i], sumTTF); } } private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termContext, long sumTTF) throws IOException { assert termContext.wasBuiltFor(readerContext); - if (sumTTF == -1 && termContext.totalTermFreq() == -1) { - return termContext; - } TermStates newTermContext = new TermStates(readerContext); List leaves = readerContext.leaves(); final int len; @@ -213,12 +200,7 @@ private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termCo private static TermStates adjustDF(IndexReaderContext readerContext, TermStates ctx, int newDocFreq) throws IOException { assert ctx.wasBuiltFor(readerContext); // Use a value of ttf that is consistent with the doc freq (ie. gte) - long newTTF; - if (ctx.totalTermFreq() < 0) { - newTTF = -1; - } else { - newTTF = Math.max(ctx.totalTermFreq(), newDocFreq); - } + long newTTF = Math.max(ctx.totalTermFreq(), newDocFreq); List leaves = readerContext.leaves(); final int len; if (leaves == null) { diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9e659bb4145bb..1b1cd8d3e720a 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -57,7 +57,7 @@ public String displayName() { return displayName; } - public static Flavor fromDisplayName(final String displayName) { + public static Flavor fromDisplayName(final String displayName, final boolean strict) { switch (displayName) { case "default": return Flavor.DEFAULT; @@ -66,7 +66,12 @@ public static Flavor fromDisplayName(final String displayName) { case "unknown": return Flavor.UNKNOWN; default: - throw new IllegalStateException("unexpected distribution flavor [" + displayName + "]; your distribution is broken"); + if (strict) { + final String message = "unexpected distribution flavor [" + displayName + "]; your distribution is broken"; + throw new IllegalStateException(message); + } else { + return Flavor.UNKNOWN; + } } } @@ -75,6 +80,7 @@ public static Flavor fromDisplayName(final String displayName) { public enum Type { DEB("deb"), + DOCKER("docker"), RPM("rpm"), TAR("tar"), ZIP("zip"), @@ -90,10 +96,12 @@ public String displayName() { this.displayName = displayName; } - public static Type fromDisplayName(final String displayName) { + public static Type fromDisplayName(final String displayName, final boolean strict) { switch (displayName) { case "deb": return Type.DEB; + case "docker": + return Type.DOCKER; case "rpm": return Type.RPM; case "tar": @@ -103,9 +111,14 @@ public static Type fromDisplayName(final String displayName) { case "unknown": return Type.UNKNOWN; default: - throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); + if (strict) { + throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); + } else { + return Type.UNKNOWN; + } } } + } static { @@ -116,8 +129,9 @@ public static Type fromDisplayName(final String displayName) { final boolean isSnapshot; final String version; - flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown")); - type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown")); + // these are parsed at startup, and we require that we are able to recognize the values passed in by the startup scripts + flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown"), true); + type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown"), true); final String esPrefix = "elasticsearch-" + Version.CURRENT; final URL url = getElasticsearchCodeSourceLocation(); @@ -211,12 +225,14 @@ public static Build readBuild(StreamInput in) throws IOException { final Flavor flavor; final Type type; if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - flavor = Flavor.fromDisplayName(in.readString()); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + flavor = Flavor.fromDisplayName(in.readString(), false); } else { flavor = Flavor.OSS; } if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - type = Type.fromDisplayName(in.readString()); + // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know + type = Type.fromDisplayName(in.readString(), false); } else { type = Type.UNKNOWN; } @@ -238,7 +254,13 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.flavor().displayName()); } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { - out.writeString(build.type().displayName()); + final Type buildType; + if (out.getVersion().before(Version.V_6_7_0) && build.type() == Type.DOCKER) { + buildType = Type.TAR; + } else { + buildType = build.type(); + } + out.writeString(buildType.displayName()); } out.writeString(build.shortHash()); out.writeString(build.date()); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 2e04d03438e6e..85df20d849afa 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1012,7 +1012,22 @@ private enum ElasticsearchExceptionHandle { SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_6_7_0), NO_SUCH_REMOTE_CLUSTER_EXCEPTION(org.elasticsearch.transport.NoSuchRemoteClusterException.class, - org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_6_7_0); + org.elasticsearch.transport.NoSuchRemoteClusterException::new, 152, Version.V_6_7_0), + RETENTION_LEASE_ALREADY_EXISTS_EXCEPTION( + org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException.class, + org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException::new, + 153, + Version.V_6_7_0), + RETENTION_LEASE_NOT_FOUND_EXCEPTION( + org.elasticsearch.index.seqno.RetentionLeaseNotFoundException.class, + org.elasticsearch.index.seqno.RetentionLeaseNotFoundException::new, + 154, + Version.V_6_7_0), + SHARD_NOT_IN_PRIMARY_MODE_EXCEPTION( + org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, + org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, + 155, + Version.V_6_8_1); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e0525127ee7e7..94c4a2731592a 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportException; import java.io.IOException; import java.io.PrintWriter; @@ -174,12 +175,42 @@ public static T useOrSuppress(T first, T second) { return first; } + private static final List> CORRUPTION_EXCEPTIONS = + Arrays.asList(CorruptIndexException.class, IndexFormatTooOldException.class, IndexFormatTooNewException.class); + + /** + * Looks at the given Throwable's and its cause(s) as well as any suppressed exceptions on the Throwable as well as its causes + * and returns the first corruption indicating exception (as defined by {@link #CORRUPTION_EXCEPTIONS}) it finds. + * @param t Throwable + * @return Corruption indicating exception if one is found, otherwise {@code null} + */ public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, - IndexFormatTooNewException.class); + if (t != null) { + do { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(t)) { + return (IOException) t; + } + } + for (Throwable suppressed : t.getSuppressed()) { + IOException corruptionException = unwrapCorruption(suppressed); + if (corruptionException != null) { + return corruptionException; + } + } + } while ((t = t.getCause()) != null); + } + return null; } + /** + * Looks at the given Throwable and its cause(s) and returns the first Throwable that is of one of the given classes or {@code null} + * if no matching Throwable is found. Unlike {@link #unwrapCorruption} this method does only check the given Throwable and its causes + * but does not look at any suppressed exceptions. + * @param t Throwable + * @param clazzes Classes to look for + * @return Matching Throwable if one is found, otherwise {@code null} + */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { do { @@ -193,6 +224,14 @@ public static Throwable unwrap(Throwable t, Class... clazzes) { return null; } + public static boolean isTransportStoppedForAction(final Throwable t, final String action) { + final TransportException maybeTransport = + (TransportException) ExceptionsHelper.unwrap(t, TransportException.class); + return maybeTransport != null + && (maybeTransport.getMessage().equals("TransportService is closed stopped can't send request") + || maybeTransport.getMessage().equals("transport stopped, action: " + action)); + } + /** * Throws the specified exception. If null if specified then true is returned. */ diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e520d714bb931..856edcc1bc3cf 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -120,12 +120,31 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_6_1_ID = 6060199; public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final int V_6_6_2_ID = 6060299; + public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_7_1_ID = 6070199; + public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_7_2_ID = 6070299; + public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_0_ID = 6080099; + public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_1_ID = 6080199; + public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_2_ID = 6080299; + public static final Version V_6_8_2 = new Version(V_6_8_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version CURRENT = V_7_0_0; - + public static final int V_7_0_1_ID = 7000199; + public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_0_ID = 7010099; + public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_1_ID = 7010199; + public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_2_ID = 7010299; + public static final Version V_7_1_2 = new Version(V_7_1_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version CURRENT = V_7_1_2; static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" @@ -138,10 +157,30 @@ public static Version readVersion(StreamInput in) throws IOException { public static Version fromId(int id) { switch (id) { + case V_7_1_2_ID: + return V_7_1_2; + case V_7_1_1_ID: + return V_7_1_1; + case V_7_1_0_ID: + return V_7_1_0; + case V_7_0_1_ID: + return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; + case V_6_8_2_ID: + return V_6_8_2; + case V_6_8_1_ID: + return V_6_8_1; + case V_6_8_0_ID: + return V_6_8_0; + case V_6_7_1_ID: + return V_6_7_1; + case V_6_7_2_ID: + return V_6_7_2; case V_6_7_0_ID: return V_6_7_0; + case V_6_6_2_ID: + return V_6_6_2; case V_6_6_1_ID: return V_6_6_1; case V_6_6_0_ID: diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java index 19a0618e1c5a4..0966a9f1034a8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java @@ -68,4 +68,9 @@ public String executor() { public Response read(StreamInput in) throws IOException { return reader.read(in); } + + @Override + public String toString() { + return super.toString() + "/" + listener; + } } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 8a8cea82b0a4d..83e1e01614435 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -19,8 +19,8 @@ package org.elasticsearch.action; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; @@ -209,6 +209,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.persistent.RemovePersistentTaskAction; @@ -220,6 +221,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.RestMainAction; +import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction; import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction; import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; @@ -251,7 +253,6 @@ import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -529,6 +530,11 @@ public void reg actions.register(CompletionPersistentTaskAction.INSTANCE, CompletionPersistentTaskAction.TransportAction.class); actions.register(RemovePersistentTaskAction.INSTANCE, RemovePersistentTaskAction.TransportAction.class); + // retention leases + actions.register(RetentionLeaseActions.Add.INSTANCE, RetentionLeaseActions.Add.TransportAction.class); + actions.register(RetentionLeaseActions.Renew.INSTANCE, RetentionLeaseActions.Renew.TransportAction.class); + actions.register(RetentionLeaseActions.Remove.INSTANCE, RetentionLeaseActions.Remove.TransportAction.class); + return unmodifiableMap(actions.getRegistry()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java index 8e9360bdb1238..b8c2c29a0c8f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -43,6 +43,10 @@ public final class RemoteInfoResponse extends ActionResponse implements ToXConte this.infos = Collections.unmodifiableList(new ArrayList<>(infos)); } + public List getInfos() { + return infos; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 41dce3148c1df..39006cd1e8407 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -88,10 +88,11 @@ protected void masterOperation(final ClusterSearchShardsRequest request, final C String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Map indicesAndFilters = new HashMap<>(); + Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (String index : concreteIndices) { - final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices()); + final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, indicesAndAliases); final String[] aliases = indexNameExpressionResolver.indexAliases(clusterState, index, aliasMetadata -> true, true, - request.indices()); + indicesAndAliases); indicesAndFilters.put(index, new AliasFilter(aliasFilter.getQueryBuilder(), aliases)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index fc4eb04fada99..c63f12cfbcb36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -39,18 +39,14 @@ public class ClusterStateResponse extends ActionResponse { private ClusterName clusterName; private ClusterState clusterState; - // the total compressed size of the full cluster state, not just - // the parts included in this response - private ByteSizeValue totalCompressedSize; private boolean waitForTimedOut = false; public ClusterStateResponse() { } - public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState, long sizeInBytes, boolean waitForTimedOut) { + public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState, boolean waitForTimedOut) { this.clusterName = clusterName; this.clusterState = clusterState; - this.totalCompressedSize = new ByteSizeValue(sizeInBytes); this.waitForTimedOut = waitForTimedOut; } @@ -69,16 +65,6 @@ public ClusterName getClusterName() { return this.clusterName; } - /** - * The total compressed size of the full cluster state, not just the parts - * returned by {@link #getState()}. The total compressed size is the size - * of the cluster state as it would be transmitted over the network during - * intra-node communication. - */ - public ByteSizeValue getTotalCompressedSize() { - return totalCompressedSize; - } - /** * Returns whether the request timed out waiting for a cluster state with a metadata version equal or * higher than the specified metadata. @@ -96,14 +82,8 @@ public void readFrom(StreamInput in) throws IOException { } else { clusterState = ClusterState.readFrom(in, null); } - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - totalCompressedSize = new ByteSizeValue(in); - } else { - // in a mixed cluster, if a pre 6.0 node processes the get cluster state - // request, then a compressed size won't be returned, so just return 0; - // its a temporary situation until all nodes in the cluster have been upgraded, - // at which point the correct cluster state size will always be reported - totalCompressedSize = new ByteSizeValue(0L); + if (in.getVersion().before(Version.V_7_0_0)) { + new ByteSizeValue(in); } if (in.getVersion().onOrAfter(Version.V_6_6_0)) { waitForTimedOut = in.readBoolean(); @@ -123,8 +103,8 @@ public void writeTo(StreamOutput out) throws IOException { ClusterModule.filterCustomsForPre63Clients(clusterState).writeTo(out); } } - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - totalCompressedSize.writeTo(out); + if (out.getVersion().before(Version.V_7_0_0)) { + ByteSizeValue.ZERO.writeTo(out); } if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeBoolean(waitForTimedOut); @@ -141,8 +121,7 @@ public boolean equals(Object o) { // Best effort. Only compare cluster state version and master node id, // because cluster state doesn't implement equals() Objects.equals(getVersion(clusterState), getVersion(response.clusterState)) && - Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState)) && - Objects.equals(totalCompressedSize, response.totalCompressedSize); + Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState)); } @Override @@ -153,7 +132,6 @@ public int hashCode() { clusterName, getVersion(clusterState), getMasterNodeId(clusterState), - totalCompressedSize, waitForTimedOut ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 75dc811f37db6..d0fd08b690f21 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -20,7 +20,8 @@ package org.elasticsearch.action.admin.cluster.state; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.Version; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -42,10 +43,16 @@ import java.io.IOException; import java.util.function.Predicate; -import static org.elasticsearch.discovery.zen.PublishClusterStateAction.serializeFullClusterState; - public class TransportClusterStateAction extends TransportMasterNodeReadAction { + private final Logger logger = LogManager.getLogger(getClass()); + + static { + final String property = System.getProperty("es.cluster_state.size"); + if (property != null) { + throw new IllegalArgumentException("es.cluster_state.size is no longer respected but was [" + property + "]"); + } + } @Inject public TransportClusterStateAction(TransportService transportService, ClusterService clusterService, @@ -107,7 +114,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { try { - listener.onResponse(new ClusterStateResponse(clusterState.getClusterName(), null, 0L, true)); + listener.onResponse(new ClusterStateResponse(clusterState.getClusterName(), null, true)); } catch (Exception e) { listener.onFailure(e); } @@ -184,8 +191,8 @@ private void buildResponse(final ClusterStateRequest request, } } } - listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build(), - serializeFullClusterState(currentState, Version.CURRENT).length(), false)); + + listener.onResponse(new ClusterStateResponse(currentState.getClusterName(), builder.build(), false)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 6597cdf30f5e7..3b3fa480326ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -502,6 +502,8 @@ static final class Fields { static final String VM_NAME = "vm_name"; static final String VM_VERSION = "vm_version"; static final String VM_VENDOR = "vm_vendor"; + static final String BUNDLED_JDK = "bundled_jdk"; + static final String USING_BUNDLED_JDK = "using_bundled_jdk"; static final String COUNT = "count"; static final String THREADS = "threads"; static final String MAX_UPTIME = "max_uptime"; @@ -524,6 +526,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) builder.field(Fields.VM_NAME, v.key.vmName); builder.field(Fields.VM_VERSION, v.key.vmVersion); builder.field(Fields.VM_VENDOR, v.key.vmVendor); + builder.field(Fields.BUNDLED_JDK, v.key.bundledJdk); + builder.field(Fields.USING_BUNDLED_JDK, v.key.usingBundledJdk); builder.field(Fields.COUNT, v.value); builder.endObject(); } @@ -543,12 +547,16 @@ public static class JvmVersion { String vmName; String vmVersion; String vmVendor; + boolean bundledJdk; + Boolean usingBundledJdk; JvmVersion(JvmInfo jvmInfo) { version = jvmInfo.version(); vmName = jvmInfo.getVmName(); vmVersion = jvmInfo.getVmVersion(); vmVendor = jvmInfo.getVmVendor(); + bundledJdk = jvmInfo.getBundledJdk(); + usingBundledJdk = jvmInfo.getUsingBundledJdk(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index 2c3d178db882c..d67c181ae256f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -46,7 +46,6 @@ import java.io.IOException; import java.util.Objects; -import java.util.function.Consumer; public class TransportVerifyShardBeforeCloseAction extends TransportReplicationAction< TransportVerifyShardBeforeCloseAction.ShardRequest, TransportVerifyShardBeforeCloseAction.ShardRequest, ReplicationResponse> { @@ -108,13 +107,7 @@ private void executeShardOperation(final ShardRequest request, final IndexShard if (clusterBlocks.hasIndexBlock(shardId.getIndexName(), request.clusterBlock()) == false) { throw new IllegalStateException("Index shard " + shardId + " must be blocked by " + request.clusterBlock() + " before closing"); } - - final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); - if (indexShard.getGlobalCheckpoint() != maxSeqNo) { - throw new IllegalStateException("Global checkpoint [" + indexShard.getGlobalCheckpoint() - + "] mismatches maximum sequence number [" + maxSeqNo + "] on index shard " + shardId); - } - + indexShard.verifyShardBeforeIndexClosing(); indexShard.flush(new FlushRequest().force(true)); logger.trace("{} shard is ready for closing", shardId); } @@ -136,10 +129,8 @@ class VerifyShardBeforeCloseActionReplicasProxy extends ReplicasProxy { } @Override - public void markShardCopyAsStaleIfNeeded(final ShardId shardId, final String allocationId, final Runnable onSuccess, - final Consumer onPrimaryDemoted, final Consumer onIgnoredFailure) { - shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, - createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + public void markShardCopyAsStaleIfNeeded(final ShardId shardId, final String allocationId, final ActionListener listener) { + shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, listener); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index acd0d10281463..cc18e852941f3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -126,10 +126,11 @@ public RequestValidators(Collection validators) { this.validators = validators; } - private Exception validateRequest(PutMappingRequest request, ClusterState state, Index[] indices) { + Exception validateRequest(PutMappingRequest request, ClusterState state, Index[] indices) { Exception firstException = null; for (MappingRequestValidator validator : validators) { final Exception e = validator.validateRequest(request, state, indices); + if (e == null) continue; if (firstException == null) { firstException = e; } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index ce1f1dc240426..edd59f8b18ca1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -120,7 +120,7 @@ protected void masterOperation(final RolloverRequest rolloverRequest, final Clus final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName); MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state); // will fail if the index already exists checkNoDuplicatedAliasInIndexTemplate(metaData, rolloverIndexName, rolloverRequest.getAlias()); - client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute( + client.admin().indices().prepareStats(rolloverRequest.getAlias()).clear().setDocs(true).execute( new ActionListener() { @Override public void onResponse(IndicesStatsResponse statsResponse) { @@ -249,7 +249,7 @@ static Map evaluateConditions(final Collection> co static Map evaluateConditions(final Collection> conditions, final IndexMetaData metaData, final IndicesStatsResponse statsResponse) { - return evaluateConditions(conditions, statsResponse.getPrimaries().getDocs(), metaData); + return evaluateConditions(conditions, statsResponse.getIndex(metaData.getIndex().getName()).getPrimaries().getDocs(), metaData); } static void validate(MetaData metaData, RolloverRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index a36821a4b656a..c66cbc388500e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -108,4 +108,24 @@ public CommonStats getPrimaries() { primary = stats; return stats; } + + public static class IndexStatsBuilder { + private final String indexName; + private final String uuid; + private final List shards = new ArrayList<>(); + + public IndexStatsBuilder(String indexName, String uuid) { + this.indexName = indexName; + this.uuid = uuid; + } + + public IndexStatsBuilder add(ShardStats shardStats) { + shards.add(shardStats); + return this; + } + + public IndexStats build() { + return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[shards.size()])); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index cc563948160da..0540bc3ad5cc1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.routing.ShardRouting; @@ -29,12 +30,10 @@ import org.elasticsearch.index.Index; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -83,26 +82,17 @@ public Map getIndices() { if (indicesStats != null) { return indicesStats; } - Map indicesStats = new HashMap<>(); - Set indices = new HashSet<>(); + final Map indexToIndexStatsBuilder = new HashMap<>(); for (ShardStats shard : shards) { - indices.add(shard.getShardRouting().index()); + Index index = shard.getShardRouting().index(); + IndexStatsBuilder indexStatsBuilder = indexToIndexStatsBuilder.computeIfAbsent(index.getName(), + k -> new IndexStatsBuilder(k, index.getUUID())); + indexStatsBuilder.add(shard); } - for (Index index : indices) { - List shards = new ArrayList<>(); - String indexName = index.getName(); - for (ShardStats shard : this.shards) { - if (shard.getShardRouting().getIndexName().equals(indexName)) { - shards.add(shard); - } - } - indicesStats.put( - indexName, new IndexStats(indexName, index.getUUID(), shards.toArray(new ShardStats[shards.size()])) - ); - } - this.indicesStats = indicesStats; + indicesStats = indexToIndexStatsBuilder.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().build())); return indicesStats; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 7016d1b42894f..6e10d3d42187f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -115,8 +115,9 @@ protected void doExecute(Task task, ValidateQueryRequest request, ActionListener @Override protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) { - final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterService.state(), shard.getIndexName(), - request.indices()); + final ClusterState clusterState = clusterService.state(); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, shard.getIndexName(), indicesAndAliases); return new ShardValidateQueryRequest(shard.shardId(), aliasFilter, request); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 5f61d90d500e7..65452f9a75dba 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -172,11 +172,6 @@ public String getConcreteIndex() { return getCurrentItem().index(); } - /** returns any primary response that was set by a previous primary */ - public BulkItemResponse getPreviousPrimaryResponse() { - return getCurrentItem().getPrimaryResponse(); - } - /** returns a translog location that is needed to be synced in order to persist all operations executed so far */ public Translog.Location getLocationToSync() { assert hasMoreOperationsToExecute() == false; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 02bebb5b38e42..b0ad87a8b744a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -301,11 +301,7 @@ public BulkProcessor add(DeleteRequest request) { * Adds either a delete or an index request. */ public BulkProcessor add(DocWriteRequest request) { - return add(request, null); - } - - public BulkProcessor add(DocWriteRequest request, @Nullable Object payload) { - internalAdd(request, payload); + internalAdd(request); return this; } @@ -319,9 +315,9 @@ protected void ensureOpen() { } } - private synchronized void internalAdd(DocWriteRequest request, @Nullable Object payload) { + private synchronized void internalAdd(DocWriteRequest request) { ensureOpen(); - bulkRequest.add(request, payload); + bulkRequest.add(request); executeIfNeeded(); } @@ -330,16 +326,16 @@ private synchronized void internalAdd(DocWriteRequest request, @Nullable Obje */ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, XContentType xContentType) throws Exception { - return add(data, defaultIndex, defaultType, null, null, xContentType); + return add(data, defaultIndex, defaultType, null, xContentType); } /** * Adds the data from the bytes to be processed by the bulk processor */ public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultPipeline, @Nullable Object payload, + @Nullable String defaultPipeline, XContentType xContentType) throws Exception { - bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true, xContentType); + bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, true, xContentType); executeIfNeeded(); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 42f569c0a9bda..b55425fc1b007 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -31,28 +30,17 @@ import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContent; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -60,7 +48,6 @@ import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s @@ -72,19 +59,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { private static final int REQUEST_OVERHEAD = 50; - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(BulkRequest.class)); - private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); - private static final ParseField ID = new ParseField("_id"); - private static final ParseField ROUTING = new ParseField("routing"); - private static final ParseField OP_TYPE = new ParseField("op_type"); - private static final ParseField VERSION = new ParseField("version"); - private static final ParseField VERSION_TYPE = new ParseField("version_type"); - private static final ParseField RETRY_ON_CONFLICT = new ParseField("retry_on_conflict"); - private static final ParseField PIPELINE = new ParseField("pipeline"); - private static final ParseField SOURCE = new ParseField("_source"); - private static final ParseField IF_SEQ_NO = new ParseField("if_seq_no"); - private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); /** * Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and @@ -93,7 +67,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques */ final List> requests = new ArrayList<>(); private final Set indices = new HashSet<>(); - List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -131,23 +104,18 @@ public BulkRequest add(DocWriteRequest... requests) { return this; } - public BulkRequest add(DocWriteRequest request) { - return add(request, null); - } - /** * Add a request to the current BulkRequest. * @param request Request to add - * @param payload Optional payload * @return the current bulk request */ - public BulkRequest add(DocWriteRequest request, @Nullable Object payload) { + public BulkRequest add(DocWriteRequest request) { if (request instanceof IndexRequest) { - add((IndexRequest) request, payload); + add((IndexRequest) request); } else if (request instanceof DeleteRequest) { - add((DeleteRequest) request, payload); + add((DeleteRequest) request); } else if (request instanceof UpdateRequest) { - add((UpdateRequest) request, payload); + add((UpdateRequest) request); } else { throw new IllegalArgumentException("No support for request [" + request + "]"); } @@ -170,19 +138,14 @@ public BulkRequest add(Iterable> requests) { * (for example, if no id is provided, one will be generated, or usage of the create flag). */ public BulkRequest add(IndexRequest request) { - return internalAdd(request, null); - } - - public BulkRequest add(IndexRequest request, @Nullable Object payload) { - return internalAdd(request, payload); + return internalAdd(request); } - BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { + BulkRequest internalAdd(IndexRequest request) { Objects.requireNonNull(request, "'request' must not be null"); applyGlobalMandatoryParameters(request); requests.add(request); - addPayload(payload); // lack of source is validated in validate() method sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD; indices.add(request.index()); @@ -193,19 +156,14 @@ BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { * Adds an {@link UpdateRequest} to the list of actions to execute. */ public BulkRequest add(UpdateRequest request) { - return internalAdd(request, null); - } - - public BulkRequest add(UpdateRequest request, @Nullable Object payload) { - return internalAdd(request, payload); + return internalAdd(request); } - BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) { + BulkRequest internalAdd(UpdateRequest request) { Objects.requireNonNull(request, "'request' must not be null"); applyGlobalMandatoryParameters(request); requests.add(request); - addPayload(payload); if (request.doc() != null) { sizeInBytes += request.doc().source().length(); } @@ -223,34 +181,15 @@ BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) { * Adds an {@link DeleteRequest} to the list of actions to execute. */ public BulkRequest add(DeleteRequest request) { - return add(request, null); - } - - public BulkRequest add(DeleteRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); applyGlobalMandatoryParameters(request); requests.add(request); - addPayload(payload); sizeInBytes += REQUEST_OVERHEAD; indices.add(request.index()); return this; } - private void addPayload(Object payload) { - if (payloads == null) { - if (payload == null) { - return; - } - payloads = new ArrayList<>(requests.size() + 10); - // add requests#size-1 elements to the payloads if it null (we add for an *existing* request) - for (int i = 1; i < requests.size(); i++) { - payloads.add(null); - } - } - payloads.add(payload); - } - /** * The list of requests in this bulk request. */ @@ -258,17 +197,6 @@ public List> requests() { return this.requests; } - /** - * The list of optional payloads associated with requests in the same order as the requests. Note, elements within - * it might be null if no payload has been provided. - *

- * Note, if no payloads have been provided, this method will return null (as to conserve memory overhead). - */ - @Nullable - public List payloads() { - return this.payloads; - } - /** * The number of actions in the bulk request. */ @@ -316,7 +244,7 @@ public BulkRequest add(byte[] data, int from, int length, @Nullable String defau @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, true, xContentType); + return add(data, defaultIndex, defaultType, null, null, null, true, xContentType); } /** @@ -324,7 +252,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, null, true, xContentType); + return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, true, xContentType); } /** @@ -334,7 +262,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex, xContentType); + return add(data, defaultIndex, defaultType, null, null, null, allowExplicitIndex, xContentType); } /** @@ -342,210 +270,32 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, null, allowExplicitIndex, xContentType); + return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, allowExplicitIndex, xContentType); } public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, - @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, + @Nullable String defaultPipeline, boolean allowExplicitIndex, XContentType xContentType) throws IOException { return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, defaultRouting, defaultFetchSourceContext, - defaultPipeline, payload, allowExplicitIndex, xContentType); + defaultPipeline, allowExplicitIndex, xContentType); } /** - * @deprecated use {@link #add(BytesReference, String, String, FetchSourceContext, String, Object, boolean, XContentType)} instead + * @deprecated use {@link #add(BytesReference, String, String, FetchSourceContext, String, boolean, XContentType)} instead */ @Deprecated public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, - @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, + @Nullable String defaultPipeline, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - XContent xContent = xContentType.xContent(); - int line = 0; - int from = 0; - int length = data.length(); - byte marker = xContent.streamSeparator(); - boolean typesDeprecationLogged = false; - while (true) { - int nextMarker = findNextMarker(marker, from, data, length); - if (nextMarker == -1) { - break; - } - line++; - - // now parse the action - // EMPTY is safe here because we never call namedObject - try (InputStream stream = data.slice(from, nextMarker - from).streamInput(); - XContentParser parser = xContent - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - // move pointers - from = nextMarker + 1; - - // Move to START_OBJECT - XContentParser.Token token = parser.nextToken(); - if (token == null) { - continue; - } - if (token != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " - + XContentParser.Token.START_OBJECT + " but found [" + token + "]"); - } - // Move to FIELD_NAME, that's the action - token = parser.nextToken(); - if (token != XContentParser.Token.FIELD_NAME) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " - + XContentParser.Token.FIELD_NAME + " but found [" + token + "]"); - } - String action = parser.currentName(); - - String index = defaultIndex; - String type = defaultType; - String id = null; - String routing = valueOrDefault(defaultRouting, globalRouting); - FetchSourceContext fetchSourceContext = defaultFetchSourceContext; - String opType = null; - long version = Versions.MATCH_ANY; - VersionType versionType = VersionType.INTERNAL; - long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; - int retryOnConflict = 0; - String pipeline = valueOrDefault(defaultPipeline, globalPipeline); - - // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) - // or START_OBJECT which will have another set of parameters - token = parser.nextToken(); - - if (token == XContentParser.Token.START_OBJECT) { - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (INDEX.match(currentFieldName, parser.getDeprecationHandler())){ - if (!allowExplicitIndex) { - throw new IllegalArgumentException("explicit index in bulk is not allowed"); - } - index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - if (typesDeprecationLogged == false) { - deprecationLogger.deprecatedAndMaybeLog("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); - typesDeprecationLogged = true; - } - type = parser.text(); - } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { - id = parser.text(); - } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { - routing = parser.text(); - } else if (OP_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - opType = parser.text(); - } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { - version = parser.longValue(); - } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - versionType = VersionType.fromString(parser.text()); - } else if (IF_SEQ_NO.match(currentFieldName, parser.getDeprecationHandler())) { - ifSeqNo = parser.longValue(); - } else if (IF_PRIMARY_TERM.match(currentFieldName, parser.getDeprecationHandler())) { - ifPrimaryTerm = parser.longValue(); - } else if (RETRY_ON_CONFLICT.match(currentFieldName, parser.getDeprecationHandler())) { - retryOnConflict = parser.intValue(); - } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { - pipeline = parser.text(); - } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { - fetchSourceContext = FetchSourceContext.fromXContent(parser); - } else { - throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" - + currentFieldName + "]"); - } - } else if (token == XContentParser.Token.START_ARRAY) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + - "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); - } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, - parser.getDeprecationHandler())) { - fetchSourceContext = FetchSourceContext.fromXContent(parser); - } else if (token != XContentParser.Token.VALUE_NULL) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line - + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); - } - } - } else if (token != XContentParser.Token.END_OBJECT) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " - + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); - } - - if ("delete".equals(action)) { - add(new DeleteRequest(index, type, id).routing(routing) - .version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm), payload); - } else { - nextMarker = findNextMarker(marker, from, data, length); - if (nextMarker == -1) { - break; - } - line++; - - // we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks - // of index request. - if ("index".equals(action)) { - if (opType == null) { - internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) - .setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType), payload); - } else { - internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) - .create("create".equals(opType)).setPipeline(pipeline) - .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); - } - } else if ("create".equals(action)) { - internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) - .create(true).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); - } else if ("update".equals(action)) { - if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { - throw new IllegalArgumentException("Update requests do not support versioning. " + - "Please use `if_seq_no` and `if_primary_term` instead"); - } - UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).retryOnConflict(retryOnConflict) - .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .routing(routing); - // EMPTY is safe here because we never call namedObject - try (InputStream dataStream = sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType).streamInput(); - XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, dataStream)) { - updateRequest.fromXContent(sliceParser); - } - if (fetchSourceContext != null) { - updateRequest.fetchSource(fetchSourceContext); - } - IndexRequest upsertRequest = updateRequest.upsertRequest(); - if (upsertRequest != null) { - upsertRequest.setPipeline(defaultPipeline); - } - - internalAdd(updateRequest, payload); - } - // move pointers - from = nextMarker + 1; - } - } - } + String routing = valueOrDefault(defaultRouting, globalRouting); + String pipeline = valueOrDefault(defaultPipeline, globalPipeline); + new BulkRequestParser(true).parse(data, defaultIndex, defaultType, routing, defaultFetchSourceContext, pipeline, + allowExplicitIndex, xContentType, this::internalAdd, this::internalAdd, this::add); return this; } - /** - * Returns the sliced {@link BytesReference}. If the {@link XContentType} is JSON, the byte preceding the marker is checked to see - * if it is a carriage return and if so, the BytesReference is sliced so that the carriage return is ignored - */ - private BytesReference sliceTrimmingCarriageReturn(BytesReference bytesReference, int from, int nextMarker, XContentType xContentType) { - final int length; - if (XContentType.JSON == xContentType && bytesReference.get(nextMarker - 1) == (byte) '\r') { - length = nextMarker - from - 1; - } else { - length = nextMarker - from; - } - return bytesReference.slice(from, length); - } - /** * Sets the number of shard copies that must be active before proceeding with the write. * See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details. @@ -615,18 +365,6 @@ public String routing() { return globalRouting; } - private int findNextMarker(byte marker, int from, BytesReference data, int length) { - for (int i = from; i < length; i++) { - if (data.get(i) == marker) { - return i; - } - } - if (from != length) { - throw new IllegalArgumentException("The bulk request must be terminated by a newline [\n]"); - } - return -1; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java new file mode 100644 index 0000000000000..0336c90780c77 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.bulk; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.rest.action.document.RestBulkAction; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; + +import java.io.IOException; +import java.io.InputStream; +import java.util.function.Consumer; + +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + +/** + * Helper to parse bulk requests. This should be considered an internal class. + */ +public final class BulkRequestParser { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(BulkRequestParser.class)); + + private static final ParseField INDEX = new ParseField("_index"); + private static final ParseField TYPE = new ParseField("_type"); + private static final ParseField ID = new ParseField("_id"); + private static final ParseField ROUTING = new ParseField("routing"); + private static final ParseField OP_TYPE = new ParseField("op_type"); + private static final ParseField VERSION = new ParseField("version"); + private static final ParseField VERSION_TYPE = new ParseField("version_type"); + private static final ParseField RETRY_ON_CONFLICT = new ParseField("retry_on_conflict"); + private static final ParseField PIPELINE = new ParseField("pipeline"); + private static final ParseField SOURCE = new ParseField("_source"); + private static final ParseField IF_SEQ_NO = new ParseField("if_seq_no"); + private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); + + private final boolean warnOnTypeUsage; + + /** + * Create a new parser. + * @param warnOnTypeUsage whether it warns upon types being explicitly specified + */ + public BulkRequestParser(boolean warnOnTypeUsage) { + this.warnOnTypeUsage = warnOnTypeUsage; + } + + private int findNextMarker(byte marker, int from, BytesReference data, int length) { + for (int i = from; i < length; i++) { + if (data.get(i) == marker) { + return i; + } + } + if (from != length) { + throw new IllegalArgumentException("The bulk request must be terminated by a newline [\\n]"); + } + return -1; + } + + /** + * Returns the sliced {@link BytesReference}. If the {@link XContentType} is JSON, the byte preceding the marker is checked to see + * if it is a carriage return and if so, the BytesReference is sliced so that the carriage return is ignored + */ + private static BytesReference sliceTrimmingCarriageReturn(BytesReference bytesReference, int from, int nextMarker, + XContentType xContentType) { + final int length; + if (XContentType.JSON == xContentType && bytesReference.get(nextMarker - 1) == (byte) '\r') { + length = nextMarker - from - 1; + } else { + length = nextMarker - from; + } + return bytesReference.slice(from, length); + } + + /** + * Parse the provided {@code data} assuming the provided default values. Index requests + * will be passed to the {@code indexRequestConsumer}, update requests to the + * {@code updateRequestConsumer} and delete requests to the {@code deleteRequestConsumer}. + */ + public void parse( + BytesReference data, @Nullable String defaultIndex, + @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, + @Nullable String defaultPipeline, boolean allowExplicitIndex, + XContentType xContentType, + Consumer indexRequestConsumer, + Consumer updateRequestConsumer, + Consumer deleteRequestConsumer) throws IOException { + parse(data, defaultIndex, null, defaultRouting, defaultFetchSourceContext, defaultPipeline, allowExplicitIndex, xContentType, + indexRequestConsumer, updateRequestConsumer, deleteRequestConsumer); + } + + /** + * Parse the provided {@code data} assuming the provided default values. Index requests + * will be passed to the {@code indexRequestConsumer}, update requests to the + * {@code updateRequestConsumer} and delete requests to the {@code deleteRequestConsumer}. + * @deprecated Use {@link #parse(BytesReference, String, String, FetchSourceContext, String, boolean, XContentType, + * Consumer, Consumer, Consumer)} instead. + */ + @Deprecated + public void parse( + BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, + @Nullable String defaultPipeline, boolean allowExplicitIndex, + XContentType xContentType, + Consumer indexRequestConsumer, + Consumer updateRequestConsumer, + Consumer deleteRequestConsumer) throws IOException { + XContent xContent = xContentType.xContent(); + int line = 0; + int from = 0; + int length = data.length(); + byte marker = xContent.streamSeparator(); + boolean typesDeprecationLogged = false; + while (true) { + int nextMarker = findNextMarker(marker, from, data, length); + if (nextMarker == -1) { + break; + } + line++; + + // now parse the action + // EMPTY is safe here because we never call namedObject + try (InputStream stream = data.slice(from, nextMarker - from).streamInput(); + XContentParser parser = xContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + // move pointers + from = nextMarker + 1; + + // Move to START_OBJECT + XContentParser.Token token = parser.nextToken(); + if (token == null) { + continue; + } + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + + XContentParser.Token.START_OBJECT + " but found [" + token + "]"); + } + // Move to FIELD_NAME, that's the action + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + + XContentParser.Token.FIELD_NAME + " but found [" + token + "]"); + } + String action = parser.currentName(); + + String index = defaultIndex; + String type = defaultType; + String id = null; + String routing = defaultRouting; + FetchSourceContext fetchSourceContext = defaultFetchSourceContext; + String opType = null; + long version = Versions.MATCH_ANY; + VersionType versionType = VersionType.INTERNAL; + long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + int retryOnConflict = 0; + String pipeline = defaultPipeline; + + // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) + // or START_OBJECT which will have another set of parameters + token = parser.nextToken(); + + if (token == XContentParser.Token.START_OBJECT) { + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (INDEX.match(currentFieldName, parser.getDeprecationHandler())){ + if (!allowExplicitIndex) { + throw new IllegalArgumentException("explicit index in bulk is not allowed"); + } + index = parser.text(); + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + if (warnOnTypeUsage && typesDeprecationLogged == false) { + deprecationLogger.deprecatedAndMaybeLog("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); + typesDeprecationLogged = true; + } + type = parser.text(); + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { + routing = parser.text(); + } else if (OP_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + opType = parser.text(); + } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { + version = parser.longValue(); + } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + versionType = VersionType.fromString(parser.text()); + } else if (IF_SEQ_NO.match(currentFieldName, parser.getDeprecationHandler())) { + ifSeqNo = parser.longValue(); + } else if (IF_PRIMARY_TERM.match(currentFieldName, parser.getDeprecationHandler())) { + ifPrimaryTerm = parser.longValue(); + } else if (RETRY_ON_CONFLICT.match(currentFieldName, parser.getDeprecationHandler())) { + retryOnConflict = parser.intValue(); + } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { + pipeline = parser.text(); + } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { + fetchSourceContext = FetchSourceContext.fromXContent(parser); + } else { + throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + + currentFieldName + "]"); + } + } else if (token == XContentParser.Token.START_ARRAY) { + throw new IllegalArgumentException("Malformed action/metadata line [" + line + + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); + } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, + parser.getDeprecationHandler())) { + fetchSourceContext = FetchSourceContext.fromXContent(parser); + } else if (token != XContentParser.Token.VALUE_NULL) { + throw new IllegalArgumentException("Malformed action/metadata line [" + line + + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); + } + } + } else if (token != XContentParser.Token.END_OBJECT) { + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); + } + + if ("delete".equals(action)) { + deleteRequestConsumer.accept(new DeleteRequest(index, type, id).routing(routing) + .version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm)); + } else { + nextMarker = findNextMarker(marker, from, data, length); + if (nextMarker == -1) { + break; + } + line++; + + // we use internalAdd so we don't fork here, this allows us not to copy over the big byte array to small chunks + // of index request. + if ("index".equals(action)) { + if (opType == null) { + indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + .version(version).versionType(versionType) + .setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) + .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType)); + } else { + indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + .version(version).versionType(versionType) + .create("create".equals(opType)).setPipeline(pipeline) + .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) + .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType)); + } + } else if ("create".equals(action)) { + indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + .version(version).versionType(versionType) + .create(true).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) + .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType)); + } else if ("update".equals(action)) { + if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { + throw new IllegalArgumentException("Update requests do not support versioning. " + + "Please use `if_seq_no` and `if_primary_term` instead"); + } + UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).retryOnConflict(retryOnConflict) + .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) + .routing(routing); + // EMPTY is safe here because we never call namedObject + try (InputStream dataStream = sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType).streamInput(); + XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, dataStream)) { + updateRequest.fromXContent(sliceParser); + } + if (fetchSourceContext != null) { + updateRequest.fetchSource(fetchSourceContext); + } + IndexRequest upsertRequest = updateRequest.upsertRequest(); + if (upsertRequest != null) { + upsertRequest.setPipeline(defaultPipeline); + } + + updateRequestConsumer.accept(updateRequest); + } + // move pointers + from = nextMarker + 1; + } + } + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index efb08a01e43ab..2f9a130eb82fd 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -26,8 +26,8 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; +import java.util.HashSet; +import java.util.Set; public class BulkShardRequest extends ReplicatedWriteRequest { @@ -48,7 +48,13 @@ public BulkItemRequest[] items() { @Override public String[] indices() { - List indices = new ArrayList<>(); + // A bulk shard request encapsulates items targeted at a specific shard of an index. + // However, items could be targeting aliases of the index, so the bulk request although + // targeting a single concrete index shard might do so using several alias names. + // These alias names have to be exposed by this method because authorization works with + // aliases too, specifically, the item's target alias can be authorized but the concrete + // index might not be. + Set indices = new HashSet<>(1); for (BulkItemRequest item : items) { if (item != null) { indices.add(item.index()); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 567b7fb808090..8603fe9c9da8a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -47,11 +47,14 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -151,6 +154,72 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener responses = new AtomicArray<>(bulkRequest.requests.size()); + boolean hasIndexRequestsWithPipelines = false; + final MetaData metaData = clusterService.state().getMetaData(); + ImmutableOpenMap indicesMetaData = metaData.indices(); + for (DocWriteRequest actionRequest : bulkRequest.requests) { + IndexRequest indexRequest = getIndexWriteRequest(actionRequest); + if (indexRequest != null) { + // get pipeline from request + String pipeline = indexRequest.getPipeline(); + if (pipeline == null) { + // start to look for default pipeline via settings found in the index meta data + IndexMetaData indexMetaData = indicesMetaData.get(actionRequest.index()); + if (indexMetaData == null && indexRequest.index() != null) { + // if the write request if through an alias use the write index's meta data + AliasOrIndex indexOrAlias = metaData.getAliasAndIndexLookup().get(indexRequest.index()); + if (indexOrAlias != null && indexOrAlias.isAlias()) { + AliasOrIndex.Alias alias = (AliasOrIndex.Alias) indexOrAlias; + indexMetaData = alias.getWriteIndex(); + } + } + if (indexMetaData != null) { + // Find the default pipeline if one is defined from and existing index. + String defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(indexMetaData.getSettings()); + indexRequest.setPipeline(defaultPipeline); + if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { + hasIndexRequestsWithPipelines = true; + } + } else if (indexRequest.index() != null) { + // No index exists yet (and is valid request), so matching index templates to look for a default pipeline + List templates = MetaDataIndexTemplateService.findTemplates(metaData, indexRequest.index()); + assert (templates != null); + String defaultPipeline = IngestService.NOOP_PIPELINE_NAME; + // order of templates are highest order first, break if we find a default_pipeline + for (IndexTemplateMetaData template : templates) { + final Settings settings = template.settings(); + if (IndexSettings.DEFAULT_PIPELINE.exists(settings)) { + defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(settings); + break; + } + } + indexRequest.setPipeline(defaultPipeline); + if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { + hasIndexRequestsWithPipelines = true; + } + } + } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { + hasIndexRequestsWithPipelines = true; + } + } + } + + if (hasIndexRequestsWithPipelines) { + // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but + // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, + // this path is never taken. + try { + if (clusterService.localNode().isIngestNode()) { + processBulkIndexIngestRequest(task, bulkRequest, listener); + } else { + ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); + } + } catch (Exception e) { + listener.onFailure(e); + } + return; + } + if (needToCheck()) { // Attempt to create all the indices that we're going to need during the bulk before we start. // Step 1: collect all the indices in the request @@ -181,7 +250,7 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener { + executeBulk(task, bulkRequest, startTime, ActionListener.wrap(listener::onResponse, inner -> { inner.addSuppressed(e); listener.onFailure(inner); }), responses, indicesThatCannotBeCreated); @@ -215,56 +284,7 @@ public void onFailure(Exception e) { } } } else { - executeIngestAndBulk(task, bulkRequest, startTime, listener, responses, emptyMap()); - } - } - - private void executeIngestAndBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, - final ActionListener listener, final AtomicArray responses, - Map indicesThatCannotBeCreated) { - boolean hasIndexRequestsWithPipelines = false; - final MetaData metaData = clusterService.state().getMetaData(); - ImmutableOpenMap indicesMetaData = metaData.indices(); - for (DocWriteRequest actionRequest : bulkRequest.requests) { - IndexRequest indexRequest = getIndexWriteRequest(actionRequest); - if(indexRequest != null){ - String pipeline = indexRequest.getPipeline(); - if (pipeline == null) { - IndexMetaData indexMetaData = indicesMetaData.get(actionRequest.index()); - if (indexMetaData == null && indexRequest.index() != null) { - //check the alias - AliasOrIndex indexOrAlias = metaData.getAliasAndIndexLookup().get(indexRequest.index()); - if (indexOrAlias != null && indexOrAlias.isAlias()) { - AliasOrIndex.Alias alias = (AliasOrIndex.Alias) indexOrAlias; - indexMetaData = alias.getWriteIndex(); - } - } - if (indexMetaData == null) { - indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); - } else { - String defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(indexMetaData.getSettings()); - indexRequest.setPipeline(defaultPipeline); - if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { - hasIndexRequestsWithPipelines = true; - } - } - } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { - hasIndexRequestsWithPipelines = true; - } - } - } - if (hasIndexRequestsWithPipelines) { - try { - if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, listener); - } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); - } - } catch (Exception e) { - listener.onFailure(e); - } - } else { - executeBulk(task, bulkRequest, startTimeNanos, listener, responses, indicesThatCannotBeCreated); + executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap()); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 4df8efa6b2743..f182c2985815d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -261,16 +261,7 @@ private static void finalizePrimaryOperationOnCompletion(BulkPrimaryExecutionCon context.getPrimary().shardId(), docWriteRequest.opType().getLowercase(), docWriteRequest), failure); } - final BulkItemResponse primaryResponse; - // if it's a conflict failure, and we already executed the request on a primary (and we execute it - // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) - // then just use the response we got from the failed execution - if (TransportShardBulkAction.isConflictException(failure) && context.getPreviousPrimaryResponse() != null) { - primaryResponse = context.getPreviousPrimaryResponse(); - } else { - primaryResponse = executionResult; - } - context.markAsCompleted(primaryResponse); + context.markAsCompleted(executionResult); } else { context.markAsCompleted(executionResult); } @@ -483,26 +474,16 @@ private static void executeOnPrimaryWhileHandlingMappi throws IOException { T result = toExecute.get(); if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - // try to update the mappings and try again. + // try to update the mappings and mark the context as needing to try again. try { mappingUpdater.accept(result.getRequiredMappingUpdate()); + context.markAsRequiringMappingUpdate(); } catch (Exception e) { // failure to update the mapping should translate to a failure of specific requests. Other requests // still need to be executed and replicated. onComplete.accept(exceptionToResult.apply(e)); return; } - - // TODO - we can fall back to a wait for cluster state update but I'm keeping the logic the same for now - result = toExecute.get(); - - if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { - // double mapping update. We assume that the successful mapping update wasn't yet processed on the node - // and retry the entire request again. - context.markAsRequiringMappingUpdate(); - } else { - onComplete.accept(result); - } } else { onComplete.accept(result); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index cc1e842a1ee5e..fe8475322592f 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -52,6 +52,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Set; /** * Explain transport action. Computes the explain on the targeted shard. @@ -83,8 +84,8 @@ protected boolean resolveIndex(ExplainRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { - final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(), - request.request().index()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); + final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(), indicesAndAliases); request.request().filteringAlias(aliasFilter); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) { diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 38d78fdc0c1a1..8b0e5c744e569 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -135,8 +135,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws final String buildType = (String) value.get("build_type"); response.build = new Build( - buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), - buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), + /* + * Be lenient when reading on the wire, the enumeration values from other versions might be different than what + * we know. + */ + buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor, false), + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType, false), (String) value.get("build_hash"), (String) value.get("build_date"), (boolean) value.get("build_snapshot"), diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index bd996377c39c1..3f09f00b9ac1e 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -48,7 +48,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.function.Consumer; import java.util.function.Supplier; public class TransportResyncReplicationAction extends TransportWriteAction onPrimaryDemoted, Consumer onIgnoredFailure) { - shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, false, message, exception, - createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { + shardStateAction.remoteShardFailed( + replica.shardId(), replica.allocationId().getId(), primaryTerm, false, message, exception, listener); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 45bfb099f2b71..fdccfad7b47f5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -228,7 +228,7 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { results.getSuccessfulResults().forEach((entry) -> { try { SearchShardTarget searchShardTarget = entry.getSearchShardTarget(); - Transport.Connection connection = getConnection(null, searchShardTarget.getNodeId()); + Transport.Connection connection = getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); sendReleaseSearchContext(entry.getRequestId(), connection, searchShardTarget.getOriginalIndices()); } catch (Exception inner) { inner.addSuppressed(exception); @@ -281,14 +281,12 @@ public final SearchRequest getRequest() { @Override public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; if (allowPartialResults == false && failures.length > 0){ raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); } - return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), skippedOps.get(), buildTookInMillis(), failures, clusters); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index d7387ac69bd03..910b82c8ccbf4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -42,7 +42,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -66,8 +65,6 @@ import java.util.Map; import java.util.function.Function; import java.util.function.IntFunction; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; public final class SearchPhaseController { @@ -491,8 +488,8 @@ private ReducedQueryPhase reducedQueryPhase(Collection aggregationsList) { - ReduceContext reduceContext = reduceContextFunction.apply(false); - return aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList, - null, reduceContext); - } - - private static InternalAggregations reduceAggs(List aggregationsList, - List pipelineAggregators, ReduceContext reduceContext) { - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); - if (pipelineAggregators != null) { - List newAggs = StreamSupport.stream(aggregations.spliterator(), false) - .map((p) -> (InternalAggregation) p) - .collect(Collectors.toList()); - for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext); - newAggs.add(newAgg); - } - return new InternalAggregations(newAggs); - } - return aggregations; - } - public static final class ReducedQueryPhase { // the sum of all hits across all reduces shards final TotalHits totalHits; @@ -646,7 +617,8 @@ public void consumeResult(SearchPhaseResult result) { private synchronized void consumeInternal(QuerySearchResult querySearchResult) { if (index == bufferSize) { if (hasAggs) { - InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(aggsBuffer)); + ReduceContext reduceContext = controller.reduceContextFunction.apply(false); + InternalAggregations reducedAggs = InternalAggregations.reduce(Arrays.asList(aggsBuffer), reduceContext); Arrays.fill(aggsBuffer, null); aggsBuffer[0] = reducedAggs; } @@ -756,7 +728,7 @@ TotalHits getTotalHits() { assert totalHitsRelation == Relation.EQUAL_TO; return new TotalHits(totalHits, totalHitsRelation); } else { - if (totalHits < trackTotalHitsUpTo) { + if (totalHits <= trackTotalHitsUpTo) { return new TotalHits(totalHits, totalHitsRelation); } else { /* diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 3b28ca19477ab..84a536c18db01 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -168,10 +168,16 @@ SearchResponse getMergedResponse(Clusters clusters) { assert trackTotalHits == null || trackTotalHits; trackTotalHits = true; } + TopDocs topDocs = searchHitsToTopDocs(searchHits, totalHits, shards); topDocsStats.add(new TopDocsAndMaxScore(topDocs, searchHits.getMaxScore()), searchResponse.isTimedOut(), searchResponse.isTerminatedEarly()); - topDocsList.add(topDocs); + if (searchHits.getHits().length > 0) { + //there is no point in adding empty search hits and merging them with the others. Also, empty search hits always come + //without sort fields and collapse info, despite sort by field and/or field collapsing was requested, which causes + //issues reconstructing the proper TopDocs instance and breaks mergeTopDocs which expects the same type for each result. + topDocsList.add(topDocs); + } } //after going through all the hits and collecting all their distinct shards, we can assign shardIndex and set it to the ScoreDocs @@ -204,7 +210,25 @@ public int compare(ShardSearchFailure o1, ShardSearchFailure o2) { if (shardId2 == null) { return 1; } - return shardId1.compareTo(shardId2); + int shardIdCompare = shardId1.compareTo(shardId2); + //we could assume that the same shard id cannot come back from multiple clusters as even with same index name and shard index, + //the index uuid does not match. But the same cluster can be registered multiple times with different aliases, in which case + //we may get failures from the same index, yet with a different cluster alias in their shard target. + if (shardIdCompare != 0) { + return shardIdCompare; + } + String clusterAlias1 = o1.shard() == null ? null : o1.shard().getClusterAlias(); + String clusterAlias2 = o2.shard() == null ? null : o2.shard().getClusterAlias(); + if (clusterAlias1 == null && clusterAlias2 == null) { + return 0; + } + if (clusterAlias1 == null) { + return -1; + } + if (clusterAlias2 == null) { + return 1; + } + return clusterAlias1.compareTo(clusterAlias2); } private ShardId extractShardId(ShardSearchFailure failure) { @@ -279,12 +303,17 @@ private static void setShardIndex(Map shards, L } private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topDocsStats) { - SearchHit[] searchHits = new SearchHit[topDocs.scoreDocs.length]; - for (int i = 0; i < topDocs.scoreDocs.length; i++) { - FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit)topDocs.scoreDocs[i]; - searchHits[i] = scoreDoc.searchHit; + SearchHit[] searchHits; + if (topDocs == null) { + //merged TopDocs is null whenever all clusters have returned empty hits + searchHits = new SearchHit[0]; + } else { + searchHits = new SearchHit[topDocs.scoreDocs.length]; + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit)topDocs.scoreDocs[i]; + searchHits[i] = scoreDoc.searchHit; + } } - SortField[] sortFields = null; String collapseField = null; Object[] collapseValues = null; @@ -352,7 +381,7 @@ public int compareTo(ShardIdAndClusterAlias o) { } int clusterAliasCompareTo = clusterAlias.compareTo(o.clusterAlias); if (clusterAliasCompareTo != 0) { - //TODO we may want to fix this, CCS returns remote results before local ones (TransportSearchAction#mergeShardsIterators) + //CCS returns remote results before local ones (TransportSearchAction#mergeShardsIterators), fixed from 7.1 on if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { return 1; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 519f2c88e0e58..8485d6d126f0a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -80,6 +80,7 @@ import java.util.function.Function; import java.util.function.LongSupplier; +import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; public class TransportSearchAction extends HandledTransportAction { @@ -115,9 +116,10 @@ public TransportSearchAction(ThreadPool threadPool, TransportService transportSe private Map buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, Index[] concreteIndices, Map remoteAliasMap) { final Map aliasFilterMap = new HashMap<>(); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); - AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index.getName(), request.indices()); + AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index.getName(), indicesAndAliases); assert aliasFilter != null; aliasFilterMap.put(index.getUUID(), aliasFilter); } @@ -248,6 +250,9 @@ static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { if (searchRequest.scroll() != null) { return false; } + if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { + return false; + } SearchSourceBuilder source = searchRequest.source(); return source == null || source.collapse() == null || source.collapse().getInnerHits() == null || source.collapse().getInnerHits().isEmpty(); @@ -311,8 +316,6 @@ public void onFailure(Exception e) { if (localIndices != null) { ActionListener ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener); - //here we provide the empty string a cluster alias, which means no prefix in index name, - //but the coord node will perform non final reduce as it's not null. SearchRequest ccsLocalSearchRequest = SearchRequest.withLocalReduction(searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false); localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); diff --git a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index d297df478a4b8..85d8a2c1a38db 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -41,7 +41,7 @@ public class DefaultShardOperationFailedException extends ShardOperationFailedEx private static final String SHARD_ID = "shard"; private static final String REASON = "reason"; - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "failures", true, arg -> new DefaultShardOperationFailedException((String) arg[0], (int) arg[1] ,(Throwable) arg[2])); static { diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java b/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java new file mode 100644 index 0000000000000..df9afd32ca21c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicBoolean; + +public class ListenerTimeouts { + + /** + * Wraps a listener with a listener that can timeout. After the timeout period the + * {@link ActionListener#onFailure(Exception)} will be called with a + * {@link ElasticsearchTimeoutException} if the listener has not already been completed. + * + * @param threadPool used to schedule the timeout + * @param listener to that can timeout + * @param timeout period before listener failed + * @param executor to use for scheduling timeout + * @param listenerName name of the listener for timeout exception + * @return the wrapped listener that will timeout + */ + public static ActionListener wrapWithTimeout(ThreadPool threadPool, ActionListener listener, + TimeValue timeout, String executor, String listenerName) { + TimeoutableListener wrappedListener = new TimeoutableListener<>(listener, timeout, listenerName); + wrappedListener.cancellable = threadPool.schedule(wrappedListener, timeout, executor); + return wrappedListener; + } + + private static class TimeoutableListener implements ActionListener, Runnable { + + private final AtomicBoolean isDone = new AtomicBoolean(false); + private final ActionListener delegate; + private final TimeValue timeout; + private final String listenerName; + private volatile Scheduler.ScheduledCancellable cancellable; + + private TimeoutableListener(ActionListener delegate, TimeValue timeout, String listenerName) { + this.delegate = delegate; + this.timeout = timeout; + this.listenerName = listenerName; + } + + @Override + public void onResponse(Response response) { + if (isDone.compareAndSet(false, true)) { + cancellable.cancel(); + delegate.onResponse(response); + } + } + + @Override + public void onFailure(Exception e) { + if (isDone.compareAndSet(false, true)) { + cancellable.cancel(); + delegate.onFailure(e); + } + } + + @Override + public void run() { + if (isDone.compareAndSet(false, true)) { + String timeoutMessage = "[" + listenerName + "]" + " timed out after [" + timeout + "]"; + delegate.onFailure(new ElasticsearchTimeoutException(timeoutMessage)); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 0da39a593a2c1..22e90cfc1356b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -21,12 +21,14 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; @@ -34,6 +36,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -43,7 +46,6 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; public class ReplicationOperation< Request extends ReplicationRequest, @@ -133,10 +135,7 @@ private void markUnavailableShardsAsStale(ReplicaRequest replicaRequest, Replica for (String allocationId : replicationGroup.getUnavailableInSyncShards()) { pendingActions.incrementAndGet(); replicasProxy.markShardCopyAsStaleIfNeeded(replicaRequest.shardId(), allocationId, - ReplicationOperation.this::decPendingAndFinishIfNeeded, - ReplicationOperation.this::onPrimaryDemoted, - throwable -> decPendingAndFinishIfNeeded() - ); + ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary)); } } @@ -192,20 +191,39 @@ public void onFailure(Exception replicaException) { shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false)); } String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard); - replicasProxy.failShardIfNeeded(shard, message, - replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded, - ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded()); + replicasProxy.failShardIfNeeded(shard, message, replicaException, + ActionListener.wrap(r -> decPendingAndFinishIfNeeded(), ReplicationOperation.this::onNoLongerPrimary)); + } + + @Override + public String toString() { + return "[" + replicaRequest + "][" + shard + "]"; } }); } - private void onPrimaryDemoted(Exception demotionFailure) { - String primaryFail = String.format(Locale.ROOT, - "primary shard [%s] was demoted while failing replica shard", - primary.routingEntry()); - // we are no longer the primary, fail ourselves and start over - primary.failShard(primaryFail, demotionFailure); - finishAsFailed(new RetryOnPrimaryException(primary.routingEntry().shardId(), primaryFail, demotionFailure)); + private void onNoLongerPrimary(Exception failure) { + final Throwable cause = ExceptionsHelper.unwrapCause(failure); + final boolean nodeIsClosing = + cause instanceof NodeClosedException + || ExceptionsHelper.isTransportStoppedForAction(cause, "internal:cluster/shard/failure"); + final String message; + if (nodeIsClosing) { + message = String.format(Locale.ROOT, + "node with primary [%s] is shutting down while failing replica shard", primary.routingEntry()); + // We prefer not to fail the primary to avoid unnecessary warning log + // when the node with the primary shard is gracefully shutting down. + } else { + if (Assertions.ENABLED) { + if (failure instanceof ShardStateAction.NoLongerPrimaryShardException == false) { + throw new AssertionError("unexpected failure", failure); + } + } + // we are no longer the primary, fail ourselves and start over + message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard", primary.routingEntry()); + primary.failShard(message, failure); + } + finishAsFailed(new RetryOnPrimaryException(primary.routingEntry().shardId(), message, failure)); } /** @@ -365,31 +383,23 @@ void performOn(ShardRouting replica, RequestT replicaRequest, long globalCheckpo * of active shards. Whether a failure is needed is left up to the * implementation. * - * @param replica shard to fail - * @param message a (short) description of the reason - * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed - * @param onSuccess a callback to call when the shard has been successfully removed from the active set. - * @param onPrimaryDemoted a callback to call when the shard can not be failed because the current primary has been demoted - * by the master. - * @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the + * @param replica shard to fail + * @param message a (short) description of the reason + * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed + * @param listener a listener that will be notified when the failing shard has been removed from the in-sync set */ - void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure); + void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener); /** * Marks shard copy as stale if needed, removing its allocation id from * the set of in-sync allocation ids. Whether marking as stale is needed * is left up to the implementation. * - * @param shardId shard id - * @param allocationId allocation id to remove from the set of in-sync allocation ids - * @param onSuccess a callback to call when the allocation id has been successfully removed from the in-sync set. - * @param onPrimaryDemoted a callback to call when the request failed because the current primary was already demoted - * by the master. - * @param onIgnoredFailure a callback to call when the request failed, but the failure can be safely ignored. + * @param shardId shard id + * @param allocationId allocation id to remove from the set of in-sync allocation ids + * @param listener a listener that will be notified when the failing shard has been removed from the in-sync set */ - void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure); + void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index a8c187745ac4a..18c271cba74b2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -62,6 +62,7 @@ import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; @@ -84,7 +85,6 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -222,7 +222,7 @@ protected ClusterBlockLevel globalBlockLevel() { * Index level block to check before request execution. Returning null means that no blocks need to be checked. */ @Nullable - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return null; } @@ -356,10 +356,18 @@ protected void doRun() throws Exception { primaryTerm, actualTerm); } - acquirePrimaryOperationPermit(indexShard, request, ActionListener.wrap( - releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), - this::onFailure - )); + acquirePrimaryOperationPermit( + indexShard, + request, + ActionListener.wrap( + releasable -> runWithPrimaryShardReference(new PrimaryShardReference(indexShard, releasable)), + e -> { + if (e instanceof ShardNotInPrimaryModeException) { + onFailure(new ReplicationOperation.RetryOnPrimaryException(shardId, "shard is not in primary mode", e)); + } else { + onFailure(e); + } + })); } void runWithPrimaryShardReference(final PrimaryShardReference primaryShardReference) { @@ -1173,47 +1181,21 @@ public void performOn( } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, - Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { // This does not need to fail the shard. The idea is that this // is a non-write operation (something like a refresh or a global // checkpoint sync) and therefore the replica should still be // "alive" if it were to fail. - onSuccess.run(); + listener.onResponse(null); } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { // This does not need to make the shard stale. The idea is that this // is a non-write operation (something like a refresh or a global // checkpoint sync) and therefore the replica should still be // "alive" if it were to be marked as stale. - onSuccess.run(); - } - - protected final ActionListener createShardActionListener(final Runnable onSuccess, - final Consumer onPrimaryDemoted, - final Consumer onIgnoredFailure) { - return new ActionListener() { - @Override - public void onResponse(Void aVoid) { - onSuccess.run(); - } - - @Override - public void onFailure(Exception shardFailedError) { - if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) { - onPrimaryDemoted.accept(shardFailedError); - } else { - // these can occur if the node is shutting down and are okay - // any other exception here is not expected and merits investigation - assert shardFailedError instanceof TransportException || - shardFailedError instanceof NodeClosedException : shardFailedError; - onIgnoredFailure.accept(shardFailedError); - } - } - }; + listener.onResponse(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 279a616160000..4781682437545 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -47,7 +47,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.function.Supplier; /** @@ -245,7 +244,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return ClusterBlockLevel.WRITE; } @@ -376,20 +375,17 @@ class WriteActionReplicasProxy extends ReplicasProxy { } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, - Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { if (TransportActions.isShardNotAvailableException(exception) == false) { logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); } - shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception, - createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + shardStateAction.remoteShardFailed( + replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception, listener); } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { - shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, - createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { + shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, listener); } } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 7d13cff2ebd09..7cbbf5d48c8e3 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.cursors.ObjectLongCursor; +import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.Fields; import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.PostingsEnum; @@ -229,7 +230,7 @@ public TermsEnum iterator() throws IOException { // reset before asking for an iterator reset(); // convert bytes ref for the terms to actual data - return new TermsEnum() { + return new BaseTermsEnum() { int currentTerm = 0; int freq = 0; int docFreq = -1; diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 7660ca9da92fa..79493ea2e66e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -60,7 +60,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.zen.PublishClusterStateAction; import java.io.IOException; import java.util.EnumSet; @@ -80,9 +79,8 @@ * The cluster state can be updated only on the master node. All updates are performed by on a * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the - * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on - * the type of discovery. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The - * publishing mechanism can be overridden by other discovery. + * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on + * the type of discovery. *

* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state * differences instead of the entire state on each change. The publishing mechanism should only send differences diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index 78eceeb12bcca..725da675952f2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -474,8 +474,10 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeVInt(upsertsCount); for (Map.Entry entry : upserts.entrySet()) { - keySerializer.writeKey(entry.getKey(), out); - valueSerializer.write(entry.getValue(), out); + if(valueSerializer.supportsVersion(entry.getValue(), version)) { + keySerializer.writeKey(entry.getKey(), out); + valueSerializer.write(entry.getValue(), out); + } } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index d6c2824fdbb10..28249754b50f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -21,6 +21,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cluster.coordination.FollowersChecker; +import org.elasticsearch.cluster.coordination.LeaderChecker; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -32,8 +34,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; -import org.elasticsearch.discovery.zen.MasterFaultDetection; -import org.elasticsearch.discovery.zen.NodesFaultDetection; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -51,8 +51,7 @@ * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are * removed. Also, it periodically checks that all connections are still open and if needed restores them. * Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond - * to pings. This is done by {@link NodesFaultDetection}. Master fault detection - * is done by {@link MasterFaultDetection}. + * to pings. This is done by {@link FollowersChecker}. Master fault detection is done by {@link LeaderChecker}. */ public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index c839acf45fc80..a2474ba211d62 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -446,7 +446,7 @@ public SnapshotsInProgress(StreamInput in) throws IOException { } long repositoryStateId = in.readLong(); final String failure; - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { failure = in.readOptionalString(); } else { failure = null; @@ -488,7 +488,7 @@ public void writeTo(StreamOutput out) throws IOException { } } out.writeLong(entry.repositoryStateId); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeOptionalString(entry.failure); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index fa58dd240fa82..4711b44593c27 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -29,10 +29,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -77,15 +80,28 @@ public class ClusterBootstrapService { public ClusterBootstrapService(Settings settings, TransportService transportService, Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, Consumer votingConfigurationConsumer) { - - final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); - bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); - if (bootstrapRequirements.size() != initialMasterNodes.size()) { - throw new IllegalArgumentException( - "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + if (DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) { + if (INITIAL_MASTER_NODES_SETTING.exists(settings)) { + throw new IllegalArgumentException("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + + "] is not allowed when [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] is set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "]"); + } + if (DiscoveryNode.isMasterNode(settings) == false) { + throw new IllegalArgumentException("node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] must be master-eligible"); + } + bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); + unconfiguredBootstrapTimeout = null; + } else { + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } + unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); } - unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; this.discoveredNodesSupplier = discoveredNodesSupplier; this.isBootstrappedSupplier = isBootstrappedSupplier; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 67d2103ce672d..aaae94d0297e5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -54,14 +54,16 @@ public class ClusterFormationFailureHelper { private final Supplier clusterFormationStateSupplier; private final ThreadPool threadPool; private final TimeValue clusterFormationWarningTimeout; + private final Runnable logLastFailedJoinAttempt; @Nullable // if no warning is scheduled private volatile WarningScheduler warningScheduler; public ClusterFormationFailureHelper(Settings settings, Supplier clusterFormationStateSupplier, - ThreadPool threadPool) { + ThreadPool threadPool, Runnable logLastFailedJoinAttempt) { this.clusterFormationStateSupplier = clusterFormationStateSupplier; this.threadPool = threadPool; this.clusterFormationWarningTimeout = DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING.get(settings); + this.logLastFailedJoinAttempt = logLastFailedJoinAttempt; } public boolean isRunning() { @@ -94,6 +96,7 @@ public void onFailure(Exception e) { @Override protected void doRun() { if (isActive()) { + logLastFailedJoinAttempt.run(); logger.warn(clusterFormationStateSupplier.get().getDescription()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index dff6b5add0b09..11c88d064257d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -494,6 +494,7 @@ default void markLastAcceptedStateAsCommitted() { metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); } metaDataBuilder.clusterUUIDCommitted(true); + logger.info("cluster UUID set to [{}]", lastAcceptedState.metaData().clusterUUID()); } if (metaDataBuilder != null) { setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaDataBuilder).build()); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 6843534107178..e6acac17da90a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalClusterUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.coordination.ClusterFormationFailureHelper.ClusterFormationState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfigExclusion; @@ -52,24 +53,28 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.HandshakingTransportAddressConnector; import org.elasticsearch.discovery.PeerFinder; +import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.discovery.zen.PendingClusterStateStats; -import org.elasticsearch.discovery.SeedHostsProvider; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -97,8 +102,10 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery TimeValue.timeValueMillis(30000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope); private final Settings settings; + private final boolean singleNodeDiscovery; private final TransportService transportService; private final MasterService masterService; + private final AllocationService allocationService; private final JoinHelper joinHelper; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final Supplier persistedStateSupplier; @@ -144,7 +151,9 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.settings = settings; this.transportService = transportService; this.masterService = masterService; + this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); + this.singleNodeDiscovery = DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)); this.joinHelper = new JoinHelper(settings, allocationService, masterService, transportService, this::getCurrentTerm, this::getStateForMasterService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators); this.persistedStateSupplier = persistedStateSupplier; @@ -174,7 +183,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), transportService::getLocalNode); this.clusterFormationFailureHelper = new ClusterFormationFailureHelper(settings, this::getClusterFormationState, - transportService.getThreadPool()); + transportService.getThreadPool(), joinHelper::logLastFailedJoinAttempt); } private ClusterFormationState getClusterFormationState() { @@ -226,6 +235,14 @@ void onFollowerCheckRequest(FollowerCheckRequest followerCheckRequest) { // where we would possibly have to remove the NO_MASTER_BLOCK from the applierState when turning a candidate back to follower. if (getLastAcceptedState().term() < getCurrentTerm()) { becomeFollower("onFollowerCheckRequest", followerCheckRequest.getSender()); + } else if (mode == Mode.FOLLOWER) { + logger.trace("onFollowerCheckRequest: responding successfully to {}", followerCheckRequest); + } else if (joinHelper.isJoinPending()) { + logger.trace("onFollowerCheckRequest: rejoining master, responding successfully to {}", followerCheckRequest); + } else { + logger.trace("onFollowerCheckRequest: received check from faulty master, rejecting {}", followerCheckRequest); + throw new CoordinationStateRejectedException( + "onFollowerCheckRequest: received check from faulty master, rejecting " + followerCheckRequest); } } } @@ -436,6 +453,13 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback assert Thread.holdsLock(mutex) == false; assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible"; logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest); + + if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) { + joinCallback.onFailure(new IllegalStateException("cannot join node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + "] set to [" + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] discovery")); + return; + } + transportService.connectToNode(joinRequest.getSourceNode()); final ClusterState stateForJoinValidation = getStateForMasterService(); @@ -457,7 +481,7 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback // package private for tests void sendValidateJoinRequest(ClusterState stateForJoinValidation, JoinRequest joinRequest, - JoinHelper.JoinCallback joinCallback) { + JoinHelper.JoinCallback joinCallback) { // validate the join on the joining node, will throw a failure if it fails the validation joinHelper.sendValidateJoinRequest(joinRequest.getSourceNode(), stateForJoinValidation, new ActionListener() { @Override @@ -478,7 +502,6 @@ public void onFailure(Exception e) { }); } - private void processJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { final Optional optionalJoin = joinRequest.getOptionalJoin(); synchronized (mutex) { @@ -500,6 +523,7 @@ void becomeCandidate(String method) { method, getCurrentTerm(), mode, lastKnownLeader); if (mode != Mode.CANDIDATE) { + final Mode prevMode = mode; mode = Mode.CANDIDATE; cancelActivePublication("become candidate: " + method); joinAccumulator.close(mode); @@ -519,6 +543,10 @@ void becomeCandidate(String method) { followersChecker.updateFastResponseState(getCurrentTerm(), mode); lagDetector.clearTrackedNodes(); + if (prevMode == Mode.LEADER) { + cleanMasterService(); + } + if (applierState.nodes().getMasterNodeId() != null) { applierState = clusterStateWithNoMasterBlock(applierState); clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, (source, e) -> { @@ -555,6 +583,7 @@ void becomeLeader(String method) { void becomeFollower(String method, DiscoveryNode leaderNode) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; assert leaderNode.isMasterNode() : leaderNode + " became a leader but is not master-eligible"; + assert mode != Mode.LEADER : "do not switch to follower from leader (should be candidate first)"; if (mode == Mode.FOLLOWER && Optional.of(leaderNode).equals(lastKnownLeader)) { logger.trace("{}: coordinator remaining FOLLOWER of [{}] in term {}", @@ -590,6 +619,26 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lagDetector.clearTrackedNodes(); } + private void cleanMasterService() { + masterService.submitStateUpdateTask("clean-up after stepping down as master", + new LocalClusterUpdateTask() { + @Override + public void onFailure(String source, Exception e) { + // ignore + logger.trace("failed to clean-up after stepping down as master", e); + } + + @Override + public ClusterTasksResult execute(ClusterState currentState) { + if (currentState.nodes().isLocalNodeElectedMaster() == false) { + allocationService.cleanCaches(); + } + return unchanged(); + } + + }); + } + private PreVoteResponse getPreVoteResponse() { return new PreVoteResponse(getCurrentTerm(), coordinationState.get().getLastAcceptedTerm(), coordinationState.get().getLastAcceptedState().getVersionOrMetaDataVersion()); @@ -628,6 +677,18 @@ protected void doStart() { coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + if (lastAcceptedState.metaData().clusterUUIDCommitted()) { + logger.info("cluster UUID [{}]", lastAcceptedState.metaData().clusterUUID()); + } + final VotingConfiguration votingConfiguration = lastAcceptedState.getLastCommittedConfiguration(); + if (singleNodeDiscovery && + votingConfiguration.isEmpty() == false && + votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { + throw new IllegalStateException("cannot start with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] when local node " + getLocalNode() + + " does not have quorum in voting configuration " + votingConfiguration); + } ClusterState initialState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) .blocks(ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) @@ -945,10 +1006,11 @@ private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener publishListener, AckListener ackListener) { try { synchronized (mutex) { - if (mode != Mode.LEADER) { - logger.debug(() -> new ParameterizedMessage("[{}] failed publication as not currently leading", - clusterChangedEvent.source())); - publishListener.onFailure(new FailedToCommitClusterStateException("node stepped down as leader during publication")); + if (mode != Mode.LEADER || getCurrentTerm() != clusterChangedEvent.state().term()) { + logger.debug(() -> new ParameterizedMessage("[{}] failed publication as node is no longer master for term {}", + clusterChangedEvent.source(), clusterChangedEvent.state().term())); + publishListener.onFailure(new FailedToCommitClusterStateException("node is no longer master for term " + + clusterChangedEvent.state().term() + " while handling publication")); return; } @@ -968,27 +1030,14 @@ public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener(), ackListener, publishListener); currentPublication = Optional.of(publication); - transportService.getThreadPool().schedule(new Runnable() { - @Override - public void run() { - synchronized (mutex) { - publication.cancel("timed out after " + publishTimeout); - } - } - - @Override - public String toString() { - return "scheduled timeout for " + publication; - } - }, publishTimeout, Names.GENERIC); - final DiscoveryNodes publishNodes = publishRequest.getAcceptedState().nodes(); leaderChecker.setCurrentNodes(publishNodes); followersChecker.setCurrentNodes(publishNodes); @@ -1042,6 +1091,10 @@ private void cancelActivePublication(String reason) { } } + public Collection> getOnJoinValidators() { + return onJoinValidators; + } + public enum Mode { CANDIDATE, LEADER, FOLLOWER } @@ -1050,7 +1103,8 @@ private class CoordinatorPeerFinder extends PeerFinder { CoordinatorPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver) { - super(settings, transportService, transportAddressConnector, configuredHostsResolver); + super(settings, transportService, transportAddressConnector, + singleNodeDiscovery ? hostsResolver -> Collections.emptyList() : configuredHostsResolver); } @Override @@ -1061,6 +1115,13 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { } } + @Override + protected void startProbe(TransportAddress transportAddress) { + if (singleNodeDiscovery == false) { + super.startProbe(transportAddress); + } + } + @Override protected void onFoundPeersUpdated() { synchronized (mutex) { @@ -1152,6 +1213,7 @@ class CoordinatorPublication extends Publication { private final AckListener ackListener; private final ActionListener publishListener; private final PublicationTransportHandler.PublicationContext publicationContext; + private final Scheduler.ScheduledCancellable scheduledCancellable; // We may not have accepted our own state before receiving a join from another node, causing its join to be rejected (we cannot // safely accept a join whose last-accepted term/version is ahead of ours), so store them up and process them at the end. @@ -1192,6 +1254,19 @@ public void onNodeAck(DiscoveryNode node, Exception e) { this.localNodeAckEvent = localNodeAckEvent; this.ackListener = ackListener; this.publishListener = publishListener; + this.scheduledCancellable = transportService.getThreadPool().schedule(new Runnable() { + @Override + public void run() { + synchronized (mutex) { + cancel("timed out after " + publishTimeout); + } + } + + @Override + public String toString() { + return "scheduled timeout for " + CoordinatorPublication.this; + } + }, publishTimeout, Names.GENERIC); } private void removePublicationAndPossiblyBecomeCandidate(String reason) { @@ -1233,6 +1308,7 @@ public void onFailure(String source, Exception e) { synchronized (mutex) { removePublicationAndPossiblyBecomeCandidate("clusterApplier#onNewClusterState"); } + scheduledCancellable.cancel(); ackListener.onNodeAck(getLocalNode(), e); publishListener.onFailure(e); } @@ -1262,6 +1338,7 @@ public void onSuccess(String source) { } lagDetector.startLagDetector(publishRequest.getAcceptedState().version()); } + scheduledCancellable.cancel(); ackListener.onNodeAck(getLocalNode(), null); publishListener.onResponse(null); } @@ -1272,6 +1349,7 @@ public void onSuccess(String source) { public void onFailure(Exception e) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; removePublicationAndPossiblyBecomeCandidate("Publication.onCompletion(false)"); + scheduledCancellable.cancel(); final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException("publication failed", e); ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the master. diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java index 6bd41ccf37f0c..dff7ae5a2ee03 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.cluster.coordination; -import joptsimple.OptionSet; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; @@ -32,30 +31,24 @@ public class DetachClusterCommand extends ElasticsearchNodeCommand { static final String NODE_DETACHED_MSG = "Node was successfully detached from the cluster"; static final String CONFIRMATION_MSG = - "-------------------------------------------------------------------------------\n" + - "\n" + - "You should run this tool only if you have permanently lost all\n" + - "your master-eligible nodes, and you cannot restore the cluster\n" + - "from a snapshot, or you have already run `elasticsearch-node unsafe-bootstrap`\n" + - "on a master-eligible node that formed a cluster with this node.\n" + - "This tool can cause arbitrary data loss and its use should be your last resort.\n" + - "Do you want to proceed?\n"; + DELIMITER + + "\n" + + "You should only run this tool if you have permanently lost all of the\n" + + "master-eligible nodes in this cluster and you cannot restore the cluster\n" + + "from a snapshot, or you have already unsafely bootstrapped a new cluster\n" + + "by running `elasticsearch-node unsafe-bootstrap` on a master-eligible\n" + + "node that belonged to the same cluster as this node. This tool can cause\n" + + "arbitrary data loss and its use should be your last resort.\n" + + "\n" + + "Do you want to proceed?\n"; public DetachClusterCommand() { super("Detaches this node from its cluster, allowing it to unsafely join a new cluster"); } - @Override - protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - super.execute(terminal, options, env); - - processNodePathsWithLock(terminal, options, env); - - terminal.println(NODE_DETACHED_MSG); - } @Override - protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { final Tuple manifestMetaDataTuple = loadMetaData(terminal, dataPaths); final Manifest manifest = manifestMetaDataTuple.v1(); final MetaData metaData = manifestMetaDataTuple.v2(); @@ -63,6 +56,8 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOEx confirm(terminal, CONFIRMATION_MSG); writeNewMetaData(terminal, manifest, updateCurrentTerm(), metaData, updateMetaData(metaData), dataPaths); + + terminal.println(NODE_DETACHED_MSG); } // package-private for tests diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 9ef75879e9275..2ce9d52082458 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -44,19 +44,21 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); protected final NamedXContentRegistry namedXContentRegistry; + static final String DELIMITER = "------------------------------------------------------------------------\n"; + static final String STOP_WARNING_MSG = - "--------------------------------------------------------------------------\n" + + DELIMITER + "\n" + " WARNING: Elasticsearch MUST be stopped before running this tool." + "\n"; - static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; + protected static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = "failed to lock node's directory, is Elasticsearch still running?"; static final String NO_NODE_FOLDER_FOUND_MSG = "no node folder is found in data folder(s), node has not been started yet?"; static final String NO_MANIFEST_FILE_FOUND_MSG = "no manifest file is found, do you run pre 7.0 Elasticsearch?"; - static final String GLOBAL_GENERATION_MISSING_MSG = "no metadata is referenced from the manifest file, cluster has never been " + - "bootstrapped?"; + protected static final String GLOBAL_GENERATION_MISSING_MSG = + "no metadata is referenced from the manifest file, cluster has never been bootstrapped?"; static final String NO_GLOBAL_METADATA_MSG = "failed to find global metadata, metadata corrupted?"; static final String WRITE_METADATA_EXCEPTION_MSG = "exception occurred when writing new metadata to disk"; - static final String ABORTED_BY_USER_MSG = "aborted by user"; + protected static final String ABORTED_BY_USER_MSG = "aborted by user"; final OptionSpec nodeOrdinalOption; public ElasticsearchNodeCommand(String description) { @@ -78,7 +80,7 @@ protected void processNodePathsWithLock(Terminal terminal, OptionSet options, En if (dataPaths.length == 0) { throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); } - processNodePaths(terminal, dataPaths); + processNodePaths(terminal, dataPaths, env); } catch (LockObtainFailedException ex) { throw new ElasticsearchException( FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); @@ -114,11 +116,31 @@ protected void confirm(Terminal terminal, String msg) { } @Override - protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { terminal.println(STOP_WARNING_MSG); + if (validateBeforeLock(terminal, env)) { + processNodePathsWithLock(terminal, options, env); + } } - protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException; + /** + * Validate that the command can run before taking any locks. + * @param terminal the terminal to print to + * @param env the env to validate. + * @return true to continue, false to stop (must print message in validate). + */ + protected boolean validateBeforeLock(Terminal terminal, Environment env) { + return true; + } + + + /** + * Process the paths. Locks for the paths is held during this method invocation. + * @param terminal the terminal to use for messages + * @param dataPaths the paths of the node to process + * @param env the env of the node to process + */ + protected abstract void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException; protected void writeNewMetaData(Terminal terminal, Manifest oldManifest, long newCurrentTerm, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index a9309e9fe638a..8b5bbed1ee4e4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.cluster.coordination; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -25,6 +26,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.coordination.Coordinator.Mode; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -36,7 +38,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.zen.MembershipAction; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.threadpool.ThreadPool; @@ -53,12 +54,15 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; @@ -82,7 +86,9 @@ public class JoinHelper { private final JoinTaskExecutor joinTaskExecutor; private final TimeValue joinTimeout; - final Set> pendingOutgoingJoins = ConcurrentCollections.newConcurrentSet(); + private final Set> pendingOutgoingJoins = Collections.synchronizedSet(new HashSet<>()); + + private AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); public JoinHelper(Settings settings, AllocationService allocationService, MasterService masterService, TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, @@ -131,7 +137,7 @@ public ClusterTasksResult execute(ClusterState currentSta }); transportService.registerRequestHandler(VALIDATE_JOIN_ACTION_NAME, - MembershipAction.ValidateJoinRequest::new, ThreadPool.Names.GENERIC, + ValidateJoinRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { final ClusterState localState = currentStateSupplier.get(); if (localState.metaData().clusterUUIDCommitted() && @@ -145,8 +151,16 @@ public ClusterTasksResult execute(ClusterState currentSta }); transportService.registerRequestHandler(MembershipAction.DISCOVERY_JOIN_VALIDATE_ACTION_NAME, - MembershipAction.ValidateJoinRequest::new, ThreadPool.Names.GENERIC, + ValidateJoinRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { + final ClusterState localState = currentStateSupplier.get(); + if (localState.metaData().clusterUUIDCommitted() && + localState.metaData().clusterUUID().equals(request.getState().metaData().clusterUUID()) == false) { + throw new CoordinationStateRejectedException("mixed-version cluster join validation on cluster state" + + " with a different cluster uuid " + request.getState().metaData().clusterUUID() + + " than local cluster uuid " + localState.metaData().clusterUUID() + + ", rejecting"); + } joinValidators.forEach(action -> action.accept(transportService.getLocalNode(), request.getState())); channel.sendResponse(Empty.INSTANCE); }); @@ -189,11 +203,63 @@ public String toString() { }; } + boolean isJoinPending() { + return pendingOutgoingJoins.isEmpty() == false; + } + public void sendJoinRequest(DiscoveryNode destination, Optional optionalJoin) { sendJoinRequest(destination, optionalJoin, () -> { }); } + // package-private for testing + static class FailedJoinAttempt { + private final DiscoveryNode destination; + private final JoinRequest joinRequest; + private final TransportException exception; + private final long timestamp; + + FailedJoinAttempt(DiscoveryNode destination, JoinRequest joinRequest, TransportException exception) { + this.destination = destination; + this.joinRequest = joinRequest; + this.exception = exception; + this.timestamp = System.nanoTime(); + } + + void logNow() { + logger.log(getLogLevel(exception), + () -> new ParameterizedMessage("failed to join {} with {}", destination, joinRequest), + exception); + } + + static Level getLogLevel(TransportException e) { + Throwable cause = e.unwrapCause(); + if (cause instanceof CoordinationStateRejectedException || + cause instanceof FailedToCommitClusterStateException || + cause instanceof NotMasterException) { + return Level.DEBUG; + } + return Level.INFO; + } + + void logWarnWithTimestamp() { + logger.info(() -> new ParameterizedMessage("last failed join attempt was {} ago, failed to join {} with {}", + TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - timestamp)), + destination, + joinRequest), + exception); + } + } + + + void logLastFailedJoinAttempt() { + FailedJoinAttempt attempt = lastFailedJoinAttempt.get(); + if (attempt != null) { + attempt.logWarnWithTimestamp(); + lastFailedJoinAttempt.compareAndSet(attempt, null); + } + } + public void sendJoinRequest(DiscoveryNode destination, Optional optionalJoin, Runnable onCompletion) { assert destination.isMasterNode() : "trying to join master-ineligible " + destination; final JoinRequest joinRequest = new JoinRequest(transportService.getLocalNode(), optionalJoin); @@ -222,6 +288,7 @@ public void handleResponse(Empty response) { pendingOutgoingJoins.remove(dedupKey); logger.debug("successfully joined {} with {}", destination, joinRequest); onCompletion.run(); + lastFailedJoinAttempt.set(null); } @Override @@ -229,6 +296,9 @@ public void handleException(TransportException exp) { pendingOutgoingJoins.remove(dedupKey); logger.info(() -> new ParameterizedMessage("failed to join {} with {}", destination, joinRequest), exp); onCompletion.run(); + FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); + attempt.logNow(); + lastFailedJoinAttempt.set(attempt); } @Override @@ -276,7 +346,7 @@ public void sendValidateJoinRequest(DiscoveryNode node, ClusterState state, Acti actionName = VALIDATE_JOIN_ACTION_NAME; } transportService.sendRequest(node, actionName, - new MembershipAction.ValidateJoinRequest(state), + new ValidateJoinRequest(state), TransportRequestOptions.builder().withTimeout(joinTimeout).build(), new EmptyTransportResponseHandler(ThreadPool.Names.GENERIC) { @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index a360ea1ab60b8..ef83b9191d094 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -194,6 +194,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState .minimumMasterNodesOnPublishingMaster(minimumMasterNodesOnLocalNode) .build(); logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + allocationService.cleanCaches(); tmpState = PersistentTasksCustomMetaData.disassociateDeadNodes(tmpState); return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index e2a94f1140b92..d6bd22bcd76fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -21,6 +21,7 @@ import org.elasticsearch.cli.CommandLoggingConfigurator; import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.env.NodeRepurposeCommand; // NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization // after LoggingAwareCommand instance is constructed. @@ -32,8 +33,10 @@ public class NodeToolCli extends MultiCommand { public NodeToolCli() { - super("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes", ()->{}); + super("A CLI tool to do unsafe cluster and index manipulations on current node", + ()->{}); CommandLoggingConfigurator.configureLoggingWithoutConfig(); + subcommands.put("repurpose", new NodeRepurposeCommand()); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index b0b91cd0980f2..deaca572e973e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -320,13 +320,17 @@ private static void buildDiffAndSerializeStates(ClusterState clusterState, Clust } try { if (sendFullVersion || !previousState.nodes().nodeExists(node)) { - serializedStates.putIfAbsent(node.getVersion(), serializeFullClusterState(clusterState, node.getVersion())); + if (serializedStates.containsKey(node.getVersion()) == false) { + serializedStates.put(node.getVersion(), serializeFullClusterState(clusterState, node.getVersion())); + } } else { // will send a diff if (diff == null) { diff = clusterState.diff(previousState); } - serializedDiffs.putIfAbsent(node.getVersion(), serializeDiffClusterState(diff, node.getVersion())); + if (serializedDiffs.containsKey(node.getVersion()) == false) { + serializedDiffs.put(node.getVersion(), serializeDiffClusterState(diff, node.getVersion())); + } } } catch (IOException e) { throw new ElasticsearchException("failed to serialize cluster state for publishing to node {}", e, node); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index 72afe8ec70428..c15e832142eaf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.cluster.coordination; -import joptsimple.OptionSet; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -45,15 +44,17 @@ public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { static final String CLUSTER_STATE_TERM_VERSION_MSG_FORMAT = "Current node cluster state (term, version) pair is (%s, %s)"; static final String CONFIRMATION_MSG = - "--------------------------------------------------------------------------\n" + + DELIMITER + + "\n" + + "You should only run this tool if you have permanently lost half or more\n" + + "of the master-eligible nodes in this cluster, and you cannot restore the\n" + + "cluster from a snapshot. This tool can cause arbitrary data loss and its\n" + + "use should be your last resort. If you have multiple surviving master\n" + + "eligible nodes, you should run this tool on the node with the highest\n" + + "cluster state (term, version) pair.\n" + "\n" + - "You should run this tool only if you have permanently lost half\n" + - "or more of the master-eligible nodes, and you cannot restore the cluster\n" + - "from a snapshot. This tool can cause arbitrary data loss and its use " + - "should be your last resort.\n" + - "If you have multiple survived master eligible nodes, consider running\n" + - "this tool on the node with the highest cluster state (term, version) pair.\n" + "Do you want to proceed?\n"; + static final String NOT_MASTER_NODE_MSG = "unsafe-bootstrap tool can only be run on master eligible node"; static final String NO_NODE_METADATA_FOUND_MSG = "no node meta data is found, node has not been started yet?"; @@ -70,9 +71,7 @@ public class UnsafeBootstrapMasterCommand extends ElasticsearchNodeCommand { } @Override - protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - super.execute(terminal, options, env); - + protected boolean validateBeforeLock(Terminal terminal, Environment env) { Settings settings = env.settings(); terminal.println(Terminal.Verbosity.VERBOSE, "Checking node.master setting"); Boolean master = Node.NODE_MASTER_SETTING.get(settings); @@ -80,12 +79,10 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th throw new ElasticsearchException(NOT_MASTER_NODE_MSG); } - processNodePathsWithLock(terminal, options, env); - - terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); + return true; } - protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOException { + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { terminal.println(Terminal.Verbosity.VERBOSE, "Loading node metadata"); final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); if (nodeMetaData == null) { @@ -128,5 +125,7 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths) throws IOEx .build(); writeNewMetaData(terminal, manifest, manifest.getCurrentTerm(), metaData, newMetaData, dataPaths); + + terminal.println(MASTER_NODE_BOOTSTRAPPED_MSG); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java similarity index 50% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeRequest.java rename to server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index 6b49fcc9979c2..dec4a13c67d1e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/IndexUpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -16,38 +16,37 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.migration; +package org.elasticsearch.cluster.coordination; -import org.elasticsearch.client.Validatable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.transport.TransportRequest; -import java.util.Objects; +import java.io.IOException; -/** - * A request for performing Upgrade on Index - * Part of Migration API - */ -public class IndexUpgradeRequest implements Validatable { +public class ValidateJoinRequest extends TransportRequest { + private ClusterState state; - private String index; + public ValidateJoinRequest() {} - public IndexUpgradeRequest(String index) { - this.index = index; + public ValidateJoinRequest(ClusterState state) { + this.state = state; } - public String index() { - return index; + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.state = ClusterState.readFrom(in, null); } @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IndexUpgradeRequest request = (IndexUpgradeRequest) o; - return Objects.equals(index, request.index); + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.state.writeTo(out); } - @Override - public int hashCode() { - return Objects.hash(index); + public ClusterState getState() { + return state; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 050d97ba54cf0..31f7271cdd06d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateFormatter; @@ -42,15 +43,19 @@ import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; +import java.util.Spliterators; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static java.util.Collections.unmodifiableList; @@ -308,71 +313,88 @@ public String resolveDateMathExpression(String dateExpression) { return dateMathExpressionResolver.resolveExpression(dateExpression, new Context(null, null)); } + /** + * Resolve an array of expressions to the set of indices and aliases that these expressions match. + */ + public Set resolveExpressions(ClusterState state, String... expressions) { + Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false); + List resolvedExpressions = Arrays.asList(expressions); + for (ExpressionResolver expressionResolver : expressionResolvers) { + resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); + } + return Collections.unmodifiableSet(new HashSet<>(resolvedExpressions)); + } + /** * Iterates through the list of indices and selects the effective list of filtering aliases for the * given index. *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to * the index itself - null is returned. Returns {@code null} if no filtering is required. + * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressions}. */ - public String[] filteringAliases(ClusterState state, String index, String... expressions) { - return indexAliases(state, index, AliasMetaData::filteringRequired, false, expressions); + public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { + return indexAliases(state, index, AliasMetaData::filteringRequired, false, resolvedExpressions); + } + + /** + * Whether to generate the candidate set from index aliases, or from the set of resolved expressions. + * @param indexAliasesSize the number of aliases of the index + * @param resolvedExpressionsSize the number of resolved expressions + */ + // pkg-private for testing + boolean iterateIndexAliases(int indexAliasesSize, int resolvedExpressionsSize) { + return indexAliasesSize <= resolvedExpressionsSize; } /** * Iterates through the list of indices and selects the effective list of required aliases for the given index. *

Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to * the index itself - null is returned. Returns {@code null} if no filtering is required. + *

NOTE: the provided expressions must have been resolved already via {@link #resolveExpressions}. */ public String[] indexAliases(ClusterState state, String index, Predicate requiredAlias, boolean skipIdentity, - String... expressions) { - // expand the aliases wildcard - List resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); - Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true, false); - for (ExpressionResolver expressionResolver : expressionResolvers) { - resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); - } - + Set resolvedExpressions) { if (isAllIndices(resolvedExpressions)) { return null; } + final IndexMetaData indexMetaData = state.metaData().getIndices().get(index); if (indexMetaData == null) { // Shouldn't happen throw new IndexNotFoundException(index); } - // optimize for the most common single index/alias scenario - if (resolvedExpressions.size() == 1) { - String alias = resolvedExpressions.get(0); - AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias); - if (aliasMetaData == null || requiredAlias.test(aliasMetaData) == false) { - return null; - } - return new String[]{alias}; + if (skipIdentity == false && resolvedExpressions.contains(index)) { + return null; + } + + final ImmutableOpenMap indexAliases = indexMetaData.getAliases(); + final AliasMetaData[] aliasCandidates; + if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { + // faster to iterate indexAliases + aliasCandidates = StreamSupport.stream(Spliterators.spliteratorUnknownSize(indexAliases.values().iterator(), 0), false) + .map(cursor -> cursor.value) + .filter(aliasMetaData -> resolvedExpressions.contains(aliasMetaData.alias())) + .toArray(AliasMetaData[]::new); + } else { + // faster to iterate resolvedExpressions + aliasCandidates = resolvedExpressions.stream() + .map(indexAliases::get) + .filter(Objects::nonNull) + .toArray(AliasMetaData[]::new); } + List aliases = null; - for (String alias : resolvedExpressions) { - if (alias.equals(index)) { - if (skipIdentity) { - continue; - } else { - return null; - } - } - AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias); - // Check that this is an alias for the current index - // Otherwise - skip it - if (aliasMetaData != null) { - if (requiredAlias.test(aliasMetaData)) { - // If required - add it to the list of aliases - if (aliases == null) { - aliases = new ArrayList<>(); - } - aliases.add(alias); - } else { - // If not, we have a non required alias for this index - no further checking needed - return null; + for (AliasMetaData aliasMetaData : aliasCandidates) { + if (requiredAlias.test(aliasMetaData)) { + // If required - add it to the list of aliases + if (aliases == null) { + aliases = new ArrayList<>(); } + aliases.add(aliasMetaData.alias()); + } else { + // If not, we have a non required alias for this index - no further checking needed + return null; } } if (aliases == null) { @@ -499,7 +521,7 @@ public Map> resolveSearchRoutingAllIndices(MetaData metaData * @param aliasesOrIndices the array containing index names * @return true if the provided array maps to all indices, false otherwise */ - public static boolean isAllIndices(List aliasesOrIndices) { + public static boolean isAllIndices(Collection aliasesOrIndices) { return aliasesOrIndices == null || aliasesOrIndices.isEmpty() || isExplicitAllPattern(aliasesOrIndices); } @@ -510,8 +532,8 @@ public static boolean isAllIndices(List aliasesOrIndices) { * @param aliasesOrIndices the array containing index names * @return true if the provided array explicitly maps to all indices, false otherwise */ - static boolean isExplicitAllPattern(List aliasesOrIndices) { - return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && MetaData.ALL.equals(aliasesOrIndices.get(0)); + static boolean isExplicitAllPattern(Collection aliasesOrIndices) { + return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && MetaData.ALL.equals(aliasesOrIndices.iterator().next()); } /** @@ -584,7 +606,7 @@ public long getStartTime() { /** * This is used to prevent resolving aliases to concrete indices but this also means * that we might return aliases that point to a closed index. This is currently only used - * by {@link #filteringAliases(ClusterState, String, String...)} since it's the only one that needs aliases + * by {@link #filteringAliases(ClusterState, String, Set)} since it's the only one that needs aliases */ boolean isPreserveAliases() { return preserveAliases; @@ -628,6 +650,8 @@ public List resolve(Context context, List expressions) { return resolveEmptyOrTrivialWildcard(options, metaData); } + // TODO: Fix API to work with sets rather than lists since we need to convert to sets + // internally anyway. Set result = innerResolve(context, expressions, options, metaData); if (result == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index e66b55b1a7ed6..f61b2fc208f83 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -345,6 +345,19 @@ public static void toXContentWithTypes(IndexTemplateMetaData indexTemplateMetaDa builder.endObject(); } + /** + * Removes the nested type in the xContent representation of {@link IndexTemplateMetaData}. + * + * This method is useful to help bridge the gap between an the internal representation which still uses (the legacy format) a + * nested type in the mapping, and the external representation which does not use a nested type in the mapping. + */ + public static void removeType(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder) throws IOException { + builder.startObject(); + toInnerXContent(indexTemplateMetaData, builder, + new ToXContent.MapParams(Collections.singletonMap("reduce_mappings", "true")), false); + builder.endObject(); + } + /** * Serializes the template to xContent, making sure not to nest mappings under the * type name. @@ -361,6 +374,7 @@ public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, builder.endObject(); } + static void toInnerXContentWithTypes(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 6bc9104000fed..d3520da670289 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -88,7 +88,11 @@ public MetaDataIndexUpgradeService(Settings settings, NamedXContentRegistry xCon public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) { // Throws an exception if there are too-old segments: if (isUpgraded(indexMetaData)) { - return indexMetaData; + /* + * We still need to check for broken index settings since it might be that a user removed a plugin that registers a setting + * needed by this index. + */ + return archiveBrokenIndexSettings(indexMetaData); } checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion); IndexMetaData newMetaData = indexMetaData; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java index 25a605088ef66..91229c93ca854 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java @@ -97,6 +97,10 @@ public boolean shouldBootstrapNewHistoryUUID() { return false; } + public boolean expectEmptyRetentionLeases() { + return true; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -181,6 +185,11 @@ public Type getType() { public String toString() { return "existing store recovery; bootstrap_history_uuid=" + bootstrapNewHistoryUUID; } + + @Override + public boolean expectEmptyRetentionLeases() { + return bootstrapNewHistoryUUID; + } } /** @@ -317,5 +326,10 @@ public Type getType() { public String toString() { return "peer recovery"; } + + @Override + public boolean expectEmptyRetentionLeases() { + return false; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index fd3fb8edd5e17..c688a120a8b6a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -460,6 +460,10 @@ protected long currentNanoTime() { return System.nanoTime(); } + public void cleanCaches() { + gatewayAllocator.cleanCaches(); + } + /** * this class is used to describe results of applying a set of * {@link org.elasticsearch.cluster.routing.allocation.command.AllocationCommand} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 8a72fe8cb49a9..c73a630bb662c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -86,16 +86,21 @@ public EnableAllocationDecider(Settings settings, ClusterSettings clusterSetting clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } - public void setEnableRebalance(Rebalance enableRebalance) { + private void setEnableRebalance(Rebalance enableRebalance) { this.enableRebalance = enableRebalance; } - public void setEnableAllocation(Allocation enableAllocation) { + private void setEnableAllocation(Allocation enableAllocation) { this.enableAllocation = enableAllocation; } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { return allocation.decision(Decision.YES, NAME, "explicitly ignoring any disabling of allocation due to manual allocation commands via the reroute API"); @@ -136,10 +141,29 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } } + @Override + public Decision canRebalance(RoutingAllocation allocation) { + if (allocation.ignoreDisable()) { + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); + } + + if (enableRebalance == Rebalance.NONE) { + for (IndexMetaData indexMetaData : allocation.metaData()) { + if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexMetaData.getSettings()) + && INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexMetaData.getSettings()) != Rebalance.NONE) { + return allocation.decision(Decision.YES, NAME, "rebalancing is permitted on one or more indices"); + } + } + return allocation.decision(Decision.NO, NAME, "no rebalancing is allowed due to %s", setting(enableRebalance, false)); + } + + return allocation.decision(Decision.YES, NAME, "rebalancing is not globally disabled"); + } + @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { if (allocation.ignoreDisable()) { - return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation"); + return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of rebalancing"); } Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index e254196caa47b..913d8376dcd1f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -460,6 +460,12 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl nodeConnectionsService.disconnectFromNodesExcept(newClusterState.nodes()); + assert newClusterState.coordinationMetaData().getLastAcceptedConfiguration() + .equals(newClusterState.coordinationMetaData().getLastCommittedConfiguration()) + : newClusterState.coordinationMetaData().getLastAcceptedConfiguration() + + " vs " + newClusterState.coordinationMetaData().getLastCommittedConfiguration() + + " on " + newClusterState.nodes().getLocalNode(); + logger.debug("set locally applied cluster state to version {}", newClusterState.version()); state.set(newClusterState); diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java index 27c1dd18e97b8..51aecb5e19c9c 100644 --- a/server/src/main/java/org/elasticsearch/common/Numbers.java +++ b/server/src/main/java/org/elasticsearch/common/Numbers.java @@ -125,6 +125,10 @@ public static long toLongExact(Number n) { } } + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -138,6 +142,10 @@ public static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index 6d11739133dda..3558b16aac1c8 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -54,19 +54,51 @@ */ public abstract class Rounding implements Writeable { - public static String format(long epochMillis) { - return Instant.ofEpochMilli(epochMillis) + "/" + epochMillis; - } - public enum DateTimeUnit { - WEEK_OF_WEEKYEAR( (byte) 1, IsoFields.WEEK_OF_WEEK_BASED_YEAR), - YEAR_OF_CENTURY( (byte) 2, ChronoField.YEAR_OF_ERA), - QUARTER_OF_YEAR( (byte) 3, IsoFields.QUARTER_OF_YEAR), - MONTH_OF_YEAR( (byte) 4, ChronoField.MONTH_OF_YEAR), - DAY_OF_MONTH( (byte) 5, ChronoField.DAY_OF_MONTH), - HOUR_OF_DAY( (byte) 6, ChronoField.HOUR_OF_DAY), - MINUTES_OF_HOUR( (byte) 7, ChronoField.MINUTE_OF_HOUR), - SECOND_OF_MINUTE( (byte) 8, ChronoField.SECOND_OF_MINUTE); + WEEK_OF_WEEKYEAR((byte) 1, IsoFields.WEEK_OF_WEEK_BASED_YEAR) { + long roundFloor(long utcMillis) { + return DateUtils.roundWeekOfWeekYear(utcMillis); + } + }, + YEAR_OF_CENTURY((byte) 2, ChronoField.YEAR_OF_ERA) { + long roundFloor(long utcMillis) { + return DateUtils.roundYear(utcMillis); + } + }, + QUARTER_OF_YEAR((byte) 3, IsoFields.QUARTER_OF_YEAR) { + long roundFloor(long utcMillis) { + return DateUtils.roundQuarterOfYear(utcMillis); + } + }, + MONTH_OF_YEAR((byte) 4, ChronoField.MONTH_OF_YEAR) { + long roundFloor(long utcMillis) { + return DateUtils.roundMonthOfYear(utcMillis); + } + }, + DAY_OF_MONTH((byte) 5, ChronoField.DAY_OF_MONTH) { + final long unitMillis = ChronoField.DAY_OF_MONTH.getBaseUnit().getDuration().toMillis(); + long roundFloor(long utcMillis) { + return DateUtils.roundFloor(utcMillis, unitMillis); + } + }, + HOUR_OF_DAY((byte) 6, ChronoField.HOUR_OF_DAY) { + final long unitMillis = ChronoField.HOUR_OF_DAY.getBaseUnit().getDuration().toMillis(); + long roundFloor(long utcMillis) { + return DateUtils.roundFloor(utcMillis, unitMillis); + } + }, + MINUTES_OF_HOUR((byte) 7, ChronoField.MINUTE_OF_HOUR) { + final long unitMillis = ChronoField.MINUTE_OF_HOUR.getBaseUnit().getDuration().toMillis(); + long roundFloor(long utcMillis) { + return DateUtils.roundFloor(utcMillis, unitMillis); + } + }, + SECOND_OF_MINUTE((byte) 8, ChronoField.SECOND_OF_MINUTE) { + final long unitMillis = ChronoField.SECOND_OF_MINUTE.getBaseUnit().getDuration().toMillis(); + long roundFloor(long utcMillis) { + return DateUtils.roundFloor(utcMillis, unitMillis); + } + }; private final byte id; private final TemporalField field; @@ -76,6 +108,15 @@ public enum DateTimeUnit { this.field = field; } + /** + * This rounds down the supplied milliseconds since the epoch down to the next unit. In order to retain performance this method + * should be as fast as possible and not try to convert dates to java-time objects if possible + * + * @param utcMillis the milliseconds since the epoch + * @return the rounded down milliseconds since the epoch + */ + abstract long roundFloor(long utcMillis); + public byte getId() { return id; } @@ -182,12 +223,13 @@ static class TimeUnitRounding extends Rounding { private final DateTimeUnit unit; private final ZoneId timeZone; private final boolean unitRoundsToMidnight; - + private final boolean isUtcTimeZone; TimeUnitRounding(DateTimeUnit unit, ZoneId timeZone) { this.unit = unit; this.timeZone = timeZone; this.unitRoundsToMidnight = this.unit.field.getBaseUnit().getDuration().toMillis() > 3600000L; + this.isUtcTimeZone = timeZone.normalized().equals(ZoneOffset.UTC); } TimeUnitRounding(StreamInput in) throws IOException { @@ -223,9 +265,7 @@ private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { return LocalDateTime.of(localDateTime.getYear(), localDateTime.getMonthValue(), 1, 0, 0); case QUARTER_OF_YEAR: - int quarter = (int) IsoFields.QUARTER_OF_YEAR.getFrom(localDateTime); - int month = ((quarter - 1) * 3) + 1; - return LocalDateTime.of(localDateTime.getYear(), month, 1, 0, 0); + return LocalDateTime.of(localDateTime.getYear(), localDateTime.getMonth().firstMonthOfQuarter(), 1, 0, 0); case YEAR_OF_CENTURY: return LocalDateTime.of(LocalDate.of(localDateTime.getYear(), 1, 1), LocalTime.MIDNIGHT); @@ -236,7 +276,14 @@ private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { } @Override - public long round(final long utcMillis) { + public long round(long utcMillis) { + // this works as long as the offset doesn't change. It is worth getting this case out of the way first, as + // the calculations for fixing things near to offset changes are a little expensive and are unnecessary in the common case + // of working in UTC. + if (isUtcTimeZone) { + return unit.roundFloor(utcMillis); + } + Instant instant = Instant.ofEpochMilli(utcMillis); if (unitRoundsToMidnight) { final LocalDateTime localDateTime = LocalDateTime.ofInstant(instant, timeZone); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index eea30dd4e530f..8a4d51e4dc93c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -40,10 +40,10 @@ public class FsBlobStore implements BlobStore { private final boolean readOnly; - public FsBlobStore(Settings settings, Path path) throws IOException { + public FsBlobStore(Settings settings, Path path, boolean readonly) throws IOException { this.path = path; - this.readOnly = settings.getAsBoolean("readonly", false); - if (!this.readOnly) { + this.readOnly = readonly; + if (this.readOnly == false) { Files.createDirectories(path); } this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", @@ -74,6 +74,11 @@ public BlobContainer blobContainer(BlobPath path) { @Override public void delete(BlobPath path) throws IOException { + assert readOnly == false : "should not delete anything from a readonly repository: " + path; + //noinspection ConstantConditions in case assertions are disabled + if (readOnly) { + throw new ElasticsearchException("unexpectedly deleting [" + path + "] from a readonly repository"); + } IOUtils.rm(buildPath(path)); } @@ -84,7 +89,7 @@ public void close() { private synchronized Path buildAndCreate(BlobPath path) throws IOException { Path f = buildPath(path); - if (!readOnly) { + if (readOnly == false) { Files.createDirectories(f); } return f; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 795cc235ce759..c8d6f75c48b0a 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentSubParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; @@ -435,51 +436,52 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina NumberFormatException numberFormatException = null; if(parser.currentToken() == Token.START_OBJECT) { - while(parser.nextToken() != Token.END_OBJECT) { - if(parser.currentToken() == Token.FIELD_NAME) { - String field = parser.currentName(); - if(LATITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lat = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("latitude must be a number"); - } - } else if (LONGITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lon = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("longitude must be a number"); - } - } else if (GEOHASH.equals(field)) { - if(parser.nextToken() == Token.VALUE_STRING) { - geohash = parser.text(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + while (subParser.nextToken() != Token.END_OBJECT) { + if (subParser.currentToken() == Token.FIELD_NAME) { + String field = subParser.currentName(); + if (LATITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lat = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("latitude must be a number"); + } + } else if (LONGITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lon = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("longitude must be a number"); + } + } else if (GEOHASH.equals(field)) { + if (subParser.nextToken() == Token.VALUE_STRING) { + geohash = subParser.text(); + } else { + throw new ElasticsearchParseException("geohash must be a string"); + } } else { - throw new ElasticsearchParseException("geohash must be a string"); + throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); } } else { - throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); + throw new ElasticsearchParseException("token [{}] not allowed", subParser.currentToken()); } - } else { - throw new ElasticsearchParseException("token [{}] not allowed", parser.currentToken()); } } - if (geohash != null) { if(!Double.isNaN(lat) || !Double.isNaN(lon)) { throw new ElasticsearchParseException("field must be either lat/lon or geohash"); @@ -498,19 +500,21 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } else if(parser.currentToken() == Token.START_ARRAY) { - int element = 0; - while(parser.nextToken() != Token.END_ARRAY) { - if(parser.currentToken() == Token.VALUE_NUMBER) { - element++; - if(element == 1) { - lon = parser.doubleValue(); - } else if(element == 2) { - lat = parser.doubleValue(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + int element = 0; + while (subParser.nextToken() != Token.END_ARRAY) { + if (subParser.currentToken() == Token.VALUE_NUMBER) { + element++; + if (element == 1) { + lon = subParser.doubleValue(); + } else if (element == 2) { + lat = subParser.doubleValue(); + } else { + GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); + } } else { - GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + throw new ElasticsearchParseException("numeric value expected"); } - } else { - throw new ElasticsearchParseException("numeric value expected"); } } return point.reset(lat, lon); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 388f8424a4755..168d18d45e148 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.TermState; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; @@ -32,6 +33,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; +import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -126,6 +128,11 @@ public BytesRef term() throws IOException { return current; } + @Override + public AttributeSource attributes() { + throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); + } + @Override public boolean seekExact(BytesRef text) throws IOException { int docFreq = 0; @@ -195,6 +202,16 @@ public void seekExact(long ord) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } + @Override + public void seekExact(BytesRef term, TermState state) throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); + } + + @Override + public TermState termState() throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); + } + @Override public SeekStatus seekCeil(BytesRef text) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index dd4389c2d6b0f..56d1b5cedc33c 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -36,6 +36,8 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.regex.Pattern; @@ -52,7 +54,11 @@ public static Query newMatchNoDocsQuery(String reason) { public static Query newUnmappedFieldQuery(String field) { - return Queries.newMatchNoDocsQuery("unmapped field [" + (field != null ? field : "null") + "]"); + return newUnmappedFieldsQuery(Collections.singletonList(field)); + } + + public static Query newUnmappedFieldsQuery(Collection fields) { + return Queries.newMatchNoDocsQuery("unmapped fields " + fields); } public static Query newLenientFieldQuery(String field, RuntimeException e) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java b/server/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java index ee6614618010b..14785af095d5f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AddStringKeyStoreCommand.java @@ -91,8 +91,8 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th try { keystore.setString(setting, value); - } catch (IllegalArgumentException e) { - throw new UserException(ExitCodes.DATA_ERROR, "String value must contain only ASCII"); + } catch (final IllegalArgumentException e) { + throw new UserException(ExitCodes.DATA_ERROR, e.getMessage()); } keystore.save(env.configFile(), new char[0]); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0ae30c465b26e..95755108047a4 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -67,10 +67,10 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.PeerFinder; +import org.elasticsearch.discovery.SeedHostsResolver; +import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; -import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -407,10 +407,10 @@ public void apply(Settings value, Settings current, Settings previous) { ZenDiscovery.MAX_PENDING_CLUSTER_STATES_SETTING, SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING, SettingsBasedSeedHostsProvider.LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, - UnicastZenPing.DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING, - UnicastZenPing.DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING, - UnicastZenPing.LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, - UnicastZenPing.LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT, + SeedHostsResolver.DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING, + SeedHostsResolver.DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING, + SeedHostsResolver.LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, + SeedHostsResolver.LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT, SearchService.DEFAULT_KEEPALIVE_SETTING, SearchService.KEEPALIVE_INTERVAL_SETTING, SearchService.MAX_KEEPALIVE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 1d2e54ae86d7c..8c4617443c5d6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -133,7 +133,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_GC_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_SETTING, IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING, + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, @@ -158,6 +158,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, + Store.FORCE_RAM_TERM_DICT, EngineConfig.INDEX_CODEC_SETTING, EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index 3deb5f19c95fe..19a453f7e90fd 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -34,6 +34,7 @@ private KeyStoreCli() { subcommands.put("add", new AddStringKeyStoreCommand()); subcommands.put("add-file", new AddFileKeyStoreCommand()); subcommands.put("remove", new RemoveSettingKeyStoreCommand()); + subcommands.put("upgrade", new UpgradeKeyStoreCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index e017e9e7ca93f..d2f8bda84d58b 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -19,6 +19,18 @@ package org.elasticsearch.common.settings; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.BufferedChecksumIndexInput; +import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Randomness; + import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.CipherOutputStream; @@ -27,6 +39,7 @@ import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -56,18 +69,6 @@ import java.util.Set; import java.util.regex.Pattern; -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.BufferedChecksumIndexInput; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.Randomness; - /** * A disk based container for sensitive settings in Elasticsearch. * @@ -84,17 +85,6 @@ private enum EntryType { FILE } - /** An entry in the keystore. The bytes are opaque and interpreted based on the entry type. */ - private static class Entry { - final EntryType type; - final byte[] bytes; - - Entry(EntryType type, byte[] bytes) { - this.type = type; - this.bytes = bytes; - } - } - /** * A regex for the valid characters that a setting name in the keystore may use. */ @@ -110,7 +100,7 @@ private static class Entry { private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; /** The version of the metadata written before the keystore data. */ - private static final int FORMAT_VERSION = 3; + static final int FORMAT_VERSION = 4; /** The oldest metadata format version that can be read. */ private static final int MIN_FORMAT_VERSION = 1; @@ -146,6 +136,7 @@ private static class Entry { // 1: initial version, ES 5.3 // 2: file setting, ES 5.4 // 3: FIPS compliant algos, ES 6.3 + // 4: remove distinction between string/files, ES 6.8/7.1 /** The metadata format version used to read the current keystore wrapper. */ private final int formatVersion; @@ -157,7 +148,7 @@ private static class Entry { private final byte[] dataBytes; /** The decrypted secret data. See {@link #decrypt(char[])}. */ - private final SetOnce> entries = new SetOnce<>(); + private final SetOnce> entries = new SetOnce<>(); private volatile boolean closed; private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) { @@ -273,11 +264,13 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { /** Upgrades the format of the keystore, if necessary. */ public static void upgrade(KeyStoreWrapper wrapper, Path configDir, char[] password) throws Exception { - // ensure keystore.seed exists - if (wrapper.getSettingNames().contains(SEED_SETTING.getKey())) { + if (wrapper.getFormatVersion() == FORMAT_VERSION && wrapper.getSettingNames().contains(SEED_SETTING.getKey())) { return; } - addBootstrapSeed(wrapper); + // add keystore.seed if necessary + if (wrapper.getSettingNames().contains(SEED_SETTING.getKey()) == false) { + addBootstrapSeed(wrapper); + } wrapper.save(configDir, password); } @@ -350,11 +343,14 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio int numEntries = input.readInt(); while (numEntries-- > 0) { String setting = input.readUTF(); - EntryType entryType = EntryType.valueOf(input.readUTF()); + if (formatVersion == 3) { + // legacy, the keystore format would previously store the entry type + input.readUTF(); + } int entrySize = input.readInt(); byte[] entryBytes = new byte[entrySize]; input.readFully(entryBytes); - entries.get().put(setting, new Entry(entryType, entryBytes)); + entries.get().put(setting, entryBytes); } if (input.read() != -1) { throw new SecurityException("Keystore has been corrupted or tampered with"); @@ -373,12 +369,11 @@ private byte[] encrypt(char[] password, byte[] salt, byte[] iv) throws GeneralSe try (CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher); DataOutputStream output = new DataOutputStream(cipherStream)) { output.writeInt(entries.get().size()); - for (Map.Entry mapEntry : entries.get().entrySet()) { + for (Map.Entry mapEntry : entries.get().entrySet()) { output.writeUTF(mapEntry.getKey()); - Entry entry = mapEntry.getValue(); - output.writeUTF(entry.type.name()); - output.writeInt(entry.bytes.length); - output.write(entry.bytes); + byte[] entry = mapEntry.getValue(); + output.writeInt(entry.length); + output.write(entry); } } return bytes.toByteArray(); @@ -453,7 +448,7 @@ private void decryptLegacyEntries() throws GeneralSecurityException, IOException } Arrays.fill(chars, '\0'); - entries.get().put(setting, new Entry(settingType, bytes)); + entries.get().put(setting, bytes); } } @@ -526,11 +521,8 @@ public Set getSettingNames() { @Override public synchronized SecureString getString(String setting) { ensureOpen(); - Entry entry = entries.get().get(setting); - if (entry == null || entry.type != EntryType.STRING) { - throw new IllegalArgumentException("Secret setting " + setting + " is not a string"); - } - ByteBuffer byteBuffer = ByteBuffer.wrap(entry.bytes); + byte[] entry = entries.get().get(setting); + ByteBuffer byteBuffer = ByteBuffer.wrap(entry); CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); return new SecureString(charBuffer.array()); } @@ -538,11 +530,8 @@ public synchronized SecureString getString(String setting) { @Override public synchronized InputStream getFile(String setting) { ensureOpen(); - Entry entry = entries.get().get(setting); - if (entry == null || entry.type != EntryType.FILE) { - throw new IllegalArgumentException("Secret setting " + setting + " is not a file"); - } - return new ByteArrayInputStream(entry.bytes); + byte[] entry = entries.get().get(setting); + return new ByteArrayInputStream(entry); } /** @@ -564,9 +553,9 @@ synchronized void setString(String setting, char[] value) { ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value)); byte[] bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); - Entry oldEntry = entries.get().put(setting, new Entry(EntryType.STRING, bytes)); + byte[] oldEntry = entries.get().put(setting, bytes); if (oldEntry != null) { - Arrays.fill(oldEntry.bytes, (byte)0); + Arrays.fill(oldEntry, (byte)0); } } @@ -575,18 +564,18 @@ synchronized void setFile(String setting, byte[] bytes) { ensureOpen(); validateSettingName(setting); - Entry oldEntry = entries.get().put(setting, new Entry(EntryType.FILE, Arrays.copyOf(bytes, bytes.length))); + byte[] oldEntry = entries.get().put(setting, Arrays.copyOf(bytes, bytes.length)); if (oldEntry != null) { - Arrays.fill(oldEntry.bytes, (byte)0); + Arrays.fill(oldEntry, (byte)0); } } /** Remove the given setting from the keystore. */ void remove(String setting) { ensureOpen(); - Entry oldEntry = entries.get().remove(setting); + byte[] oldEntry = entries.get().remove(setting); if (oldEntry != null) { - Arrays.fill(oldEntry.bytes, (byte)0); + Arrays.fill(oldEntry, (byte)0); } } @@ -601,8 +590,8 @@ private void ensureOpen() { public synchronized void close() { this.closed = true; if (null != entries.get() && entries.get().isEmpty() == false) { - for (Entry entry : entries.get().values()) { - Arrays.fill(entry.bytes, (byte) 0); + for (byte[] entry : entries.get().values()) { + Arrays.fill(entry, (byte) 0); } } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java b/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java new file mode 100644 index 0000000000000..6338f40ea05fa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +/** + * A sub-command for the keystore CLI that enables upgrading the keystore format. + */ +public class UpgradeKeyStoreCommand extends EnvironmentAwareCommand { + + UpgradeKeyStoreCommand() { + super("Upgrade the keystore format"); + } + + @Override + protected void execute(final Terminal terminal, final OptionSet options, final Environment env) throws Exception { + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(env.configFile()); + if (wrapper == null) { + throw new UserException( + ExitCodes.CONFIG, + "keystore does not exist at [" + KeyStoreWrapper.keystorePath(env.configFile()) + "]"); + } + wrapper.decrypt(new char[0]); + KeyStoreWrapper.upgrade(wrapper, env.configFile(), new char[0]); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index aeea14ee1f011..bf7999067b05a 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -47,7 +47,7 @@ public interface DateFormatter { * Parse the given input into millis-since-epoch. */ default long parseMillis(String input) { - return Instant.from(parse(input)).toEpochMilli(); + return DateFormatters.from(parse(input)).toInstant().toEpochMilli(); } /** diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 1cbaaeb80b884..b17787800858f 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -91,7 +91,7 @@ public class DateFormatters { .optionalEnd() .optionalEnd() .optionalStart() - .appendZoneOrOffsetId() + .appendOffset("+HH:MM", "Z") .optionalEnd() .optionalEnd() .optionalEnd() @@ -110,7 +110,7 @@ public class DateFormatters { .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) .optionalStart() - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .optionalEnd() .optionalStart() @@ -163,7 +163,7 @@ public class DateFormatters { .optionalEnd() .optionalEnd() .optionalStart() - .appendZoneOrOffsetId() + .appendOffset("+HH:MM", "Z") .optionalEnd() .optionalEnd() .optionalEnd() @@ -175,6 +175,43 @@ public class DateFormatters { private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter("strict_date_optional_time_nanos", STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); + /** + * Returns a ISO 8601 compatible date time formatter and parser. + * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the + * existing joda time ISO data formater + */ + private static final DateFormatter ISO_8601 = new JavaDateFormatter("iso8601", STRICT_DATE_OPTIONAL_TIME_PRINTER, + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(",") + .appendFraction(NANO_OF_SECOND, 1, 9, false) + .optionalEnd() + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalStart() + .append(TIME_ZONE_FORMATTER_NO_COLON) + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT)); + ///////////////////////////////////////// // // BEGIN basic time formatters @@ -195,7 +232,7 @@ public class DateFormatters { * of hour, two digit second of minute, and time zone offset (HHmmssZ). */ private static final DateFormatter BASIC_TIME_NO_MILLIS = new JavaDateFormatter("basic_time_no_millis", - new DateTimeFormatterBuilder().append(BASIC_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(BASIC_TIME_NO_MILLIS_BASE).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_TIME_NO_MILLIS_BASE).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -220,7 +257,7 @@ public class DateFormatters { * offset (HHmmss.SSSZ). */ private static final DateFormatter BASIC_TIME = new JavaDateFormatter("basic_time", - new DateTimeFormatterBuilder().append(BASIC_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(BASIC_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_TIME_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_TIME_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -237,7 +274,7 @@ public class DateFormatters { * offset prefixed by 'T' ('T'HHmmss.SSSZ). */ private static final DateFormatter BASIC_T_TIME = new JavaDateFormatter("basic_t_time", - new DateTimeFormatterBuilder().append(BASIC_T_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(BASIC_T_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_T_TIME_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_T_TIME_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -248,7 +285,8 @@ public class DateFormatters { * ('T'HHmmssZ). */ private static final DateFormatter BASIC_T_TIME_NO_MILLIS = new JavaDateFormatter("basic_t_time_no_millis", - new DateTimeFormatterBuilder().appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE) + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE).append(TIME_ZONE_FORMATTER_NO_COLON) .toFormatter(Locale.ROOT) @@ -275,7 +313,7 @@ public class DateFormatters { * by a 'T' (yyyyMMdd'T'HHmmss.SSSZ). */ private static final DateFormatter BASIC_DATE_TIME = new JavaDateFormatter("basic_date_time", - new DateTimeFormatterBuilder().append(BASIC_DATE_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(BASIC_DATE_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_DATE_TIME_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_DATE_TIME_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -287,9 +325,9 @@ public class DateFormatters { * Returns a basic formatter that combines a basic date and time without millis, * separated by a 'T' (yyyyMMdd'T'HHmmssZ). */ - private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = new JavaDateFormatter("basic_t_time_no_millis", + private static final DateFormatter BASIC_DATE_TIME_NO_MILLIS = new JavaDateFormatter("basic_date_time_no_millis", new DateTimeFormatterBuilder().append(BASIC_DATE_T).append(BASIC_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_DATE_T).append(BASIC_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(BASIC_DATE_T).append(BASIC_TIME_NO_MILLIS_BASE) @@ -309,7 +347,7 @@ public class DateFormatters { */ private static final DateFormatter BASIC_ORDINAL_DATE_TIME = new JavaDateFormatter("basic_ordinal_date_time", new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_PRINTER) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_FORMATTER) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_FORMATTER) @@ -323,7 +361,7 @@ public class DateFormatters { */ private static final DateFormatter BASIC_ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter("basic_ordinal_date_time_no_millis", new DateTimeFormatterBuilder().appendPattern("yyyyDDD").appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendPattern("yyyyDDD").appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendPattern("yyyyDDD").appendLiteral("T").append(BASIC_TIME_NO_MILLIS_BASE) @@ -383,7 +421,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendZoneOrOffsetId() + .appendOffset("+HH:MM", "Z") .toFormatter(Locale.ROOT), new DateTimeFormatterBuilder() .append(STRICT_BASIC_WEEK_DATE_PRINTER) @@ -418,7 +456,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .appendZoneOrOffsetId() .toFormatter(Locale.ROOT), new DateTimeFormatterBuilder() @@ -427,7 +465,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .append(TIME_ZONE_FORMATTER_NO_COLON) .toFormatter(Locale.ROOT) ); @@ -479,12 +517,20 @@ public class DateFormatters { private static final DateFormatter STRICT_HOUR_MINUTE_SECOND = new JavaDateFormatter("strict_hour_minute_second", STRICT_HOUR_MINUTE_SECOND_FORMATTER); + private static final DateTimeFormatter STRICT_DATE_PRINTER = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral('T') + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendOffset("+HH:MM", "Z") + .toFormatter(Locale.ROOT); + private static final DateTimeFormatter STRICT_DATE_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral('T') .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -492,8 +538,7 @@ public class DateFormatters { * Returns a formatter that combines a full date and time, separated by a 'T' * (yyyy-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_DATE_TIME = new JavaDateFormatter("strict_date_time", - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + private static final DateFormatter STRICT_DATE_TIME = new JavaDateFormatter("strict_date_time", STRICT_DATE_PRINTER, new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -512,7 +557,7 @@ public class DateFormatters { */ private static final DateFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter("strict_ordinal_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) @@ -531,7 +576,7 @@ public class DateFormatters { */ private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter("strict_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) @@ -615,7 +660,7 @@ public class DateFormatters { private static final DateFormatter STRICT_HOUR_MINUTE = new JavaDateFormatter("strict_hour_minute", DateTimeFormatter.ofPattern("HH:mm", Locale.ROOT)); - private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE = new DateTimeFormatterBuilder() + private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_PRINTER = new DateTimeFormatterBuilder() .appendValue(ChronoField.YEAR, 4, 10, SignStyle.EXCEEDS_PAD) .appendLiteral('-') .appendValue(DAY_OF_YEAR, 3, 3, SignStyle.NOT_NEGATIVE) @@ -628,13 +673,26 @@ public class DateFormatters { .optionalEnd() .toFormatter(Locale.ROOT); + private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE = new DateTimeFormatterBuilder() + .appendValue(ChronoField.YEAR, 4, 10, SignStyle.EXCEEDS_PAD) + .appendLiteral('-') + .appendValue(DAY_OF_YEAR, 3, 3, SignStyle.NOT_NEGATIVE) + .appendLiteral('T') + .appendPattern("HH:mm") + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .toFormatter(Locale.ROOT); + /* * Returns a formatter for a full ordinal date and time, using a four * digit year and three digit dayOfYear (yyyy-DDD'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter STRICT_ORDINAL_DATE_TIME = new JavaDateFormatter("strict_ordinal_date_time", - new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_PRINTER) + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE) @@ -666,7 +724,7 @@ public class DateFormatters { * time zone offset (HH:mm:ss.SSSZZ). */ private static final DateFormatter STRICT_TIME = new JavaDateFormatter("strict_time", - new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_TIME_FORMATTER_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_TIME_FORMATTER_BASE).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -677,7 +735,7 @@ public class DateFormatters { * time zone offset prefixed by 'T' ('T'HH:mm:ss.SSSZZ). */ private static final DateFormatter STRICT_T_TIME = new JavaDateFormatter("strict_t_time", - new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_FORMATTER_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_FORMATTER_BASE) @@ -697,7 +755,7 @@ public class DateFormatters { * hour, two digit second of minute, and time zone offset (HH:mm:ssZZ). */ private static final DateFormatter STRICT_TIME_NO_MILLIS = new JavaDateFormatter("strict_time_no_millis", - new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -709,7 +767,7 @@ public class DateFormatters { */ private static final DateFormatter STRICT_T_TIME_NO_MILLIS = new JavaDateFormatter("strict_t_time_no_millis", new DateTimeFormatterBuilder().appendLiteral("T").append(STRICT_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(STRICT_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(STRICT_TIME_NO_MILLIS_BASE) @@ -742,7 +800,7 @@ public class DateFormatters { */ private static final DateFormatter STRICT_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter("strict_week_date_time_no_millis", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) - .append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .append(STRICT_TIME_NO_MILLIS_BASE).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) .append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) @@ -754,7 +812,8 @@ public class DateFormatters { * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter STRICT_WEEK_DATE_TIME = new JavaDateFormatter("strict_week_date_time", - new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T).append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) + .append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T).append(STRICT_TIME_FORMATTER_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T).append(STRICT_TIME_FORMATTER_BASE) @@ -1052,7 +1111,7 @@ public class DateFormatters { */ private static final DateFormatter DATE_TIME = new JavaDateFormatter("date_time", STRICT_DATE_OPTIONAL_TIME_PRINTER, - new DateTimeFormatterBuilder().append(DATE_TIME_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(DATE_TIME_FORMATTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_TIME_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -1097,7 +1156,7 @@ public class DateFormatters { */ private static final DateFormatter DATE_TIME_NO_MILLIS = new JavaDateFormatter("date_time_no_millis", DATE_TIME_NO_MILLIS_PRINTER, - new DateTimeFormatterBuilder().append(DATE_TIME_PREFIX).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(DATE_TIME_PREFIX).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_TIME_PREFIX).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(DATE_TIME_PREFIX) .optionalStart().appendZoneOrOffsetId().optionalEnd().toFormatter(Locale.ROOT), @@ -1159,8 +1218,8 @@ public class DateFormatters { * digit year and three digit dayOfYear (yyyy-DDD'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter ORDINAL_DATE_TIME = new JavaDateFormatter("ordinal_date_time", - new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_FORMATTER_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_PRINTER) + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ORDINAL_DATE_TIME_FORMATTER_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ORDINAL_DATE_TIME_FORMATTER_BASE) @@ -1179,7 +1238,7 @@ public class DateFormatters { */ private static final DateFormatter ORDINAL_DATE_TIME_NO_MILLIS = new JavaDateFormatter("ordinal_date_time_no_millis", new DateTimeFormatterBuilder().append(STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ORDINAL_DATE_TIME_NO_MILLIS_BASE) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(ORDINAL_DATE_TIME_NO_MILLIS_BASE) @@ -1191,7 +1250,8 @@ public class DateFormatters { * separated by a 'T' (xxxx-'W'ww-e'T'HH:mm:ss.SSSZZ). */ private static final DateFormatter WEEK_DATE_TIME = new JavaDateFormatter("week_date_time", - new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T).append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) + .append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(WEEK_DATE_FORMATTER).appendLiteral("T").append(TIME_PREFIX) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(WEEK_DATE_FORMATTER).appendLiteral("T").append(TIME_PREFIX) @@ -1204,7 +1264,7 @@ public class DateFormatters { */ private static final DateFormatter WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter("week_date_time_no_millis", new DateTimeFormatterBuilder().append(ISO_WEEK_DATE_T) - .append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .append(STRICT_TIME_NO_MILLIS_BASE).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(WEEK_DATE_FORMATTER).append(T_TIME_NO_MILLIS_FORMATTER) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(WEEK_DATE_FORMATTER).append(T_TIME_NO_MILLIS_FORMATTER) @@ -1246,7 +1306,7 @@ public class DateFormatters { * time zone offset (HH:mm:ss.SSSZZ). */ private static final DateFormatter TIME = new JavaDateFormatter("time", - new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(TIME_PREFIX).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(TIME_PREFIX).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -1256,7 +1316,7 @@ public class DateFormatters { * hour, two digit second of minute, andtime zone offset (HH:mm:ssZZ). */ private static final DateFormatter TIME_NO_MILLIS = new JavaDateFormatter("time_no_millis", - new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().append(STRICT_TIME_NO_MILLIS_BASE).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(TIME_NO_MILLIS_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(TIME_NO_MILLIS_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -1267,7 +1327,7 @@ public class DateFormatters { * time zone offset prefixed by 'T' ('T'HH:mm:ss.SSSZZ). */ private static final DateFormatter T_TIME = new JavaDateFormatter("t_time", - new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_PRINTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder().appendLiteral('T').append(STRICT_TIME_PRINTER).appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(TIME_PREFIX) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendLiteral("T").append(TIME_PREFIX) @@ -1281,7 +1341,7 @@ public class DateFormatters { */ private static final DateFormatter T_TIME_NO_MILLIS = new JavaDateFormatter("t_time_no_millis", new DateTimeFormatterBuilder().appendLiteral("T").append(STRICT_TIME_NO_MILLIS_BASE) - .appendZoneOrOffsetId().toFormatter(Locale.ROOT), + .appendOffset("+HH:MM", "Z").toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(T_TIME_NO_MILLIS_FORMATTER).appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().append(T_TIME_NO_MILLIS_FORMATTER).append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) ); @@ -1363,7 +1423,9 @@ static DateFormatter forPattern(String input) { throw new IllegalArgumentException("No date pattern provided"); } - if ("basicDate".equals(input) || "basic_date".equals(input)) { + if ("iso8601".equals(input)) { + return ISO_8601; + } else if ("basicDate".equals(input) || "basic_date".equals(input)) { return BASIC_DATE; } else if ("basicDateTime".equals(input) || "basic_date_time".equals(input)) { return BASIC_DATE_TIME; @@ -1543,7 +1605,7 @@ static JavaDateFormatter merge(String pattern, List formatters) { if (printer == null) { printer = javaDateFormatter.getPrinter(); } - dateTimeFormatters.add(javaDateFormatter.getParser()); + dateTimeFormatters.addAll(javaDateFormatter.getParsers()); roundupBuilder.appendOptional(javaDateFormatter.getRoundupParser()); } DateTimeFormatter roundUpParser = roundupBuilder.toFormatter(Locale.ROOT); @@ -1590,7 +1652,7 @@ public static ZonedDateTime from(TemporalAccessor accessor) { if (zoneId == null) { zoneId = ZoneOffset.UTC; } - + LocalDate localDate = accessor.query(TemporalQueries.localDate()); LocalTime localTime = accessor.query(TemporalQueries.localTime()); boolean isLocalDateSet = localDate != null; diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 20646ae14599a..bec8fd927c2cc 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -23,14 +23,22 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.joda.time.DateTimeZone; +import java.time.Clock; +import java.time.Duration; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; +import static org.elasticsearch.common.time.DateUtilsRounding.getMonthOfYear; +import static org.elasticsearch.common.time.DateUtilsRounding.getTotalMillisByYearMonth; +import static org.elasticsearch.common.time.DateUtilsRounding.getYear; +import static org.elasticsearch.common.time.DateUtilsRounding.utcMillisAtStartOfYear; + public class DateUtils { public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) { if (zoneId == null) { @@ -139,4 +147,95 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { return nanoSecondsSinceEpoch / 1_000_000; } + + /** + * Rounds the given utc milliseconds sicne the epoch down to the next unit millis + * + * Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day + * In order to ensure the performane of this methods, there are no guards or checks in it + * + * @param utcMillis the milliseconds since the epoch + * @param unitMillis the unit to round to + * @return the rounded milliseconds since the epoch + */ + public static long roundFloor(long utcMillis, final long unitMillis) { + if (utcMillis >= 0) { + return utcMillis - utcMillis % unitMillis; + } else { + utcMillis += 1; + return utcMillis - utcMillis % unitMillis - unitMillis; + } + } + + /** + * Round down to the beginning of the quarter of the year of the specified time + * @param utcMillis the milliseconds since the epoch + * @return The milliseconds since the epoch rounded down to the quarter of the year + */ + public static long roundQuarterOfYear(final long utcMillis) { + int year = getYear(utcMillis); + int month = getMonthOfYear(utcMillis, year); + int firstMonthOfQuarter = (((month-1) / 3) * 3) + 1; + return DateUtils.of(year, firstMonthOfQuarter); + } + + /** + * Round down to the beginning of the month of the year of the specified time + * @param utcMillis the milliseconds since the epoch + * @return The milliseconds since the epoch rounded down to the month of the year + */ + public static long roundMonthOfYear(final long utcMillis) { + int year = getYear(utcMillis); + int month = getMonthOfYear(utcMillis, year); + return DateUtils.of(year, month); + } + + /** + * Round down to the beginning of the year of the specified time + * @param utcMillis the milliseconds since the epoch + * @return The milliseconds since the epoch rounded down to the beginning of the year + */ + public static long roundYear(final long utcMillis) { + int year = getYear(utcMillis); + return utcMillisAtStartOfYear(year); + } + + /** + * Round down to the beginning of the week based on week year of the specified time + * @param utcMillis the milliseconds since the epoch + * @return The milliseconds since the epoch rounded down to the beginning of the week based on week year + */ + public static long roundWeekOfWeekYear(final long utcMillis) { + return roundFloor(utcMillis + 3 * 86400 * 1000L, 604800000) - 3 * 86400 * 1000L; + } + + /** + * Return the first day of the month + * @param year the year to return + * @param month the month to return, ranging from 1-12 + * @return the milliseconds since the epoch of the first day of the month in the year + */ + private static long of(final int year, final int month) { + long millis = utcMillisAtStartOfYear(year); + millis += getTotalMillisByYearMonth(year, month); + return millis; + } + + /** + * Returns the current UTC date-time with milliseconds precision. + * In Java 9+ (as opposed to Java 8) the {@code Clock} implementation uses system's best clock implementation (which could mean + * that the precision of the clock can be milliseconds, microseconds or nanoseconds), whereas in Java 8 + * {@code System.currentTimeMillis()} is always used. To account for these differences, this method defines a new {@code Clock} + * which will offer a value for {@code ZonedDateTime.now()} set to always have milliseconds precision. + * + * @return {@link ZonedDateTime} instance for the current date-time with milliseconds precision in UTC + */ + public static ZonedDateTime nowWithMillisResolution() { + return nowWithMillisResolution(Clock.systemUTC()); + } + + public static ZonedDateTime nowWithMillisResolution(Clock clock) { + Clock millisResolutionClock = Clock.tick(clock, Duration.ofMillis(1)); + return ZonedDateTime.now(millisResolutionClock); + } } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/elasticsearch/common/time/DateUtilsRounding.java new file mode 100644 index 0000000000000..d9c0a9597b8a1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtilsRounding.java @@ -0,0 +1,182 @@ +/* + * Copyright 2001-2014 Stephen Colebourne + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.elasticsearch.common.time; + +/** + * This class has been copied from different locations within the joda time package, as + * these methods fast when used for rounding, as they do not require conversion to java + * time objects + * + * This code has been copied from jodatime 2.10.1 + * The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1 + * + * See following methods have been copied (along with required helper variables) + * + * - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year) + * - org.joda.time.chrono.BasicChronology.getYear(int year) + * - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year) + * - org.joda.time.chrono.BasicGJChronology.getTotalMillisByYearMonth(int year, int month) + */ +class DateUtilsRounding { + + private static final int DAYS_0000_TO_1970 = 719527; + private static final int MILLIS_PER_DAY = 86_400_000; + private static final long MILLIS_PER_YEAR = 31556952000L; + + // see org.joda.time.chrono.BasicGJChronology + private static final long[] MIN_TOTAL_MILLIS_BY_MONTH_ARRAY; + private static final long[] MAX_TOTAL_MILLIS_BY_MONTH_ARRAY; + private static final int[] MIN_DAYS_PER_MONTH_ARRAY = { + 31,28,31,30,31,30,31,31,30,31,30,31 + }; + private static final int[] MAX_DAYS_PER_MONTH_ARRAY = { + 31,29,31,30,31,30,31,31,30,31,30,31 + }; + + static { + MIN_TOTAL_MILLIS_BY_MONTH_ARRAY = new long[12]; + MAX_TOTAL_MILLIS_BY_MONTH_ARRAY = new long[12]; + + long minSum = 0; + long maxSum = 0; + for (int i = 0; i < 11; i++) { + long millis = MIN_DAYS_PER_MONTH_ARRAY[i] + * (long) MILLIS_PER_DAY; + minSum += millis; + MIN_TOTAL_MILLIS_BY_MONTH_ARRAY[i + 1] = minSum; + + millis = MAX_DAYS_PER_MONTH_ARRAY[i] + * (long) MILLIS_PER_DAY; + maxSum += millis; + MAX_TOTAL_MILLIS_BY_MONTH_ARRAY[i + 1] = maxSum; + } + } + + /** + * calculates the first day of a year in milliseconds since the epoch (assuming UTC) + * + * @param year the year + * @return the milliseconds since the epoch of the first of january at midnight of the specified year + */ + // see org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis + static long utcMillisAtStartOfYear(final int year) { + // Initial value is just temporary. + int leapYears = year / 100; + if (year < 0) { + // Add 3 before shifting right since /4 and >>2 behave differently + // on negative numbers. When the expression is written as + // (year / 4) - (year / 100) + (year / 400), + // it works for both positive and negative values, except this optimization + // eliminates two divisions. + leapYears = ((year + 3) >> 2) - leapYears + ((leapYears + 3) >> 2) - 1; + } else { + leapYears = (year >> 2) - leapYears + (leapYears >> 2); + if (isLeapYear(year)) { + leapYears--; + } + } + + return (year * 365L + (leapYears - DAYS_0000_TO_1970)) * MILLIS_PER_DAY; // millis per day + } + + private static boolean isLeapYear(final int year) { + return ((year & 3) == 0) && ((year % 100) != 0 || (year % 400) == 0); + } + + private static final long AVERAGE_MILLIS_PER_YEAR_DIVIDED_BY_TWO = MILLIS_PER_YEAR / 2; + private static final long APPROX_MILLIS_AT_EPOCH_DIVIDED_BY_TWO = (1970L * MILLIS_PER_YEAR) / 2; + + // see org.joda.time.chrono.BasicChronology + static int getYear(final long utcMillis) { + // Get an initial estimate of the year, and the millis value that + // represents the start of that year. Then verify estimate and fix if + // necessary. + + // Initial estimate uses values divided by two to avoid overflow. + long unitMillis = AVERAGE_MILLIS_PER_YEAR_DIVIDED_BY_TWO; + long i2 = (utcMillis >> 1) + APPROX_MILLIS_AT_EPOCH_DIVIDED_BY_TWO; + if (i2 < 0) { + i2 = i2 - unitMillis + 1; + } + int year = (int) (i2 / unitMillis); + + long yearStart = utcMillisAtStartOfYear(year); + long diff = utcMillis - yearStart; + + if (diff < 0) { + year--; + } else if (diff >= MILLIS_PER_DAY * 365L) { + // One year may need to be added to fix estimate. + long oneYear; + if (isLeapYear(year)) { + oneYear = MILLIS_PER_DAY * 366L; + } else { + oneYear = MILLIS_PER_DAY * 365L; + } + + yearStart += oneYear; + + if (yearStart <= utcMillis) { + // Didn't go too far, so actually add one year. + year++; + } + } + + return year; + } + + // see org.joda.time.chrono.BasicGJChronology + static int getMonthOfYear(final long utcMillis, final int year) { + // Perform a binary search to get the month. To make it go even faster, + // compare using ints instead of longs. The number of milliseconds per + // year exceeds the limit of a 32-bit int's capacity, so divide by + // 1024. No precision is lost (except time of day) since the number of + // milliseconds per day contains 1024 as a factor. After the division, + // the instant isn't measured in milliseconds, but in units of + // (128/125)seconds. + + int i = (int)((utcMillis - utcMillisAtStartOfYear(year)) >> 10); + + // There are 86400000 milliseconds per day, but divided by 1024 is + // 84375. There are 84375 (128/125)seconds per day. + + return + (isLeapYear(year)) + ? ((i < 182 * 84375) + ? ((i < 91 * 84375) + ? ((i < 31 * 84375) ? 1 : (i < 60 * 84375) ? 2 : 3) + : ((i < 121 * 84375) ? 4 : (i < 152 * 84375) ? 5 : 6)) + : ((i < 274 * 84375) + ? ((i < 213 * 84375) ? 7 : (i < 244 * 84375) ? 8 : 9) + : ((i < 305 * 84375) ? 10 : (i < 335 * 84375) ? 11 : 12))) + : ((i < 181 * 84375) + ? ((i < 90 * 84375) + ? ((i < 31 * 84375) ? 1 : (i < 59 * 84375) ? 2 : 3) + : ((i < 120 * 84375) ? 4 : (i < 151 * 84375) ? 5 : 6)) + : ((i < 273 * 84375) + ? ((i < 212 * 84375) ? 7 : (i < 243 * 84375) ? 8 : 9) + : ((i < 304 * 84375) ? 10 : (i < 334 * 84375) ? 11 : 12))); + } + + // see org.joda.time.chrono.BasicGJChronology + static long getTotalMillisByYearMonth(final int year, final int month) { + if (isLeapYear(year)) { + return MAX_TOTAL_MILLIS_BY_MONTH_ARRAY[month - 1]; + } else { + return MIN_TOTAL_MILLIS_BY_MONTH_ARRAY[month - 1]; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index bcdf9cbdcf674..d0f4200b3bafe 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; +import java.text.ParsePosition; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -29,7 +30,10 @@ import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalField; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -39,6 +43,7 @@ class JavaDateFormatter implements DateFormatter { // base fields which should be used for default parsing, when we round up for date math private static final Map ROUND_UP_BASE_FIELDS = new HashMap<>(6); + { ROUND_UP_BASE_FIELDS.put(ChronoField.MONTH_OF_YEAR, 1L); ROUND_UP_BASE_FIELDS.put(ChronoField.DAY_OF_MONTH, 1L); @@ -50,22 +55,15 @@ class JavaDateFormatter implements DateFormatter { private final String format; private final DateTimeFormatter printer; - private final DateTimeFormatter parser; + private final List parsers; private final DateTimeFormatter roundupParser; - private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter roundupParser, DateTimeFormatter parser) { - this.format = format; - this.printer = printer; - this.roundupParser = roundupParser; - this.parser = parser; - } - JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); } JavaDateFormatter(String format, DateTimeFormatter printer, Consumer roundupParserConsumer, - DateTimeFormatter... parsers) { + DateTimeFormatter... parsers) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } @@ -79,26 +77,21 @@ private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeForm } this.printer = printer; this.format = format; + if (parsers.length == 0) { - this.parser = printer; - } else if (parsers.length == 1) { - this.parser = parsers[0]; + this.parsers = Collections.singletonList(printer); } else { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - for (DateTimeFormatter parser : parsers) { - builder.appendOptional(parser); - } - this.parser = builder.toFormatter(Locale.ROOT); + this.parsers = Arrays.asList(parsers); } DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); if (format.contains("||") == false) { - builder.append(this.parser); + builder.append(this.parsers.get(0)); } roundupParserConsumer.accept(builder); - DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); + DateTimeFormatter roundupFormatter = builder.toFormatter(locale()); if (printer.getZone() != null) { - roundupFormatter = roundupFormatter.withZone(printer.getZone()); + roundupFormatter = roundupFormatter.withZone(zone()); } this.roundupParser = roundupFormatter; } @@ -107,10 +100,6 @@ DateTimeFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getParser() { - return parser; - } - DateTimeFormatter getPrinter() { return printer; } @@ -122,35 +111,69 @@ public TemporalAccessor parse(String input) { } try { - return parser.parse(input); + return doParse(input); } catch (DateTimeParseException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } + /** + * Attempt parsing the input without throwing exception. If multiple parsers are provided, + * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. + * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject + * which does not throw exceptions when parsing failed. + * + * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) + * patterns. Joda does not suffer from this. + * https://bugs.openjdk.java.net/browse/JDK-8188771 + * + * @param input An arbitrary string resembling the string representation of a date or time + * @return a TemporalAccessor if parsing was successful. + * @throws DateTimeParseException when unable to parse with any parsers + */ + private TemporalAccessor doParse(String input) { + if (parsers.size() > 1) { + for (DateTimeFormatter formatter : parsers) { + ParsePosition pos = new ParsePosition(0); + Object object = formatter.toFormat().parseObject(input, pos); + if (parsingSucceeded(object, input, pos) == true) { + return (TemporalAccessor) object; + } + } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); + } + return this.parsers.get(0).parse(input); + } + + private boolean parsingSucceeded(Object object, String input, ParsePosition pos) { + return object != null && pos.getIndex() == input.length(); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily - if (zoneId.equals(parser.getZone())) { + if (zoneId.equals(zone())) { return this; } - return new JavaDateFormatter(format, printer.withZone(zoneId), roundupParser.withZone(zoneId), parser.withZone(zoneId)); + return new JavaDateFormatter(format, printer.withZone(zoneId), + parsers.stream().map(p -> p.withZone(zoneId)).toArray(size -> new DateTimeFormatter[size])); } @Override public DateFormatter withLocale(Locale locale) { // shortcurt to not create new objects unnecessarily - if (locale.equals(parser.getLocale())) { + if (locale.equals(locale())) { return this; } - return new JavaDateFormatter(format, printer.withLocale(locale), roundupParser.withLocale(locale), parser.withLocale(locale)); + return new JavaDateFormatter(format, printer.withLocale(locale), + parsers.stream().map(p -> p.withLocale(locale)).toArray(size -> new DateTimeFormatter[size])); } @Override public String format(TemporalAccessor accessor) { - return printer.format(accessor); + return printer.format(DateFormatters.from(accessor)); } @Override @@ -170,7 +193,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(format, parser, roundupParser); + return new JavaDateMathParser(format, this, getRoundupParser()); } @Override @@ -186,12 +209,16 @@ public boolean equals(Object obj) { JavaDateFormatter other = (JavaDateFormatter) obj; return Objects.equals(format, other.format) && - Objects.equals(locale(), other.locale()) && - Objects.equals(this.printer.getZone(), other.printer.getZone()); + Objects.equals(locale(), other.locale()) && + Objects.equals(this.printer.getZone(), other.printer.getZone()); } @Override public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } + + Collection getParsers() { + return parsers; + } } diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 05e1e75efca39..dc7c195e2fd6c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -35,6 +35,7 @@ import java.time.temporal.TemporalAdjusters; import java.time.temporal.TemporalQueries; import java.util.Objects; +import java.util.function.Function; import java.util.function.LongSupplier; /** @@ -46,11 +47,11 @@ */ public class JavaDateMathParser implements DateMathParser { - private final DateTimeFormatter formatter; + private final JavaDateFormatter formatter; private final DateTimeFormatter roundUpFormatter; private final String format; - JavaDateMathParser(String format, DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + JavaDateMathParser(String format, JavaDateFormatter formatter, DateTimeFormatter roundUpFormatter) { this.format = format; Objects.requireNonNull(formatter); this.formatter = formatter; @@ -215,12 +216,12 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo throw new ElasticsearchParseException("cannot parse empty date"); } - DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; + Function formatter = roundUpIfNoTime ? this.roundUpFormatter::parse : this.formatter::parse; try { if (timeZone == null) { - return DateFormatters.from(formatter.parse(value)).toInstant(); + return DateFormatters.from(formatter.apply(value)).toInstant(); } else { - TemporalAccessor accessor = formatter.parse(value); + TemporalAccessor accessor = formatter.apply(value); ZoneId zoneId = TemporalQueries.zone().queryFrom(accessor); if (zoneId != null) { timeZone = zoneId; @@ -228,7 +229,7 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo return DateFormatters.from(accessor).withZoneSameLocal(timeZone).toInstant(); } - } catch (DateTimeParseException e) { + } catch (IllegalArgumentException | DateTimeParseException e) { throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]: [{}]", e, value, format, e.getMessage()); } diff --git a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index aee0e9cd02ada..834277b5c7282 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -186,7 +186,7 @@ public int asDistance() { } public int asDistance(String text) { - if (this.equals(AUTO)) { //AUTO + if (this.equals(AUTO) || isAutoWithCustomValues()) { //AUTO final int len = termLen(text); if (len < lowDistance) { return 0; diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 10f10e1040d99..d000565c3e2a7 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.single.SingleNodeDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -51,7 +50,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.function.BiConsumer; @@ -70,6 +68,8 @@ public class DiscoveryModule { public static final String ZEN_DISCOVERY_TYPE = "legacy-zen"; public static final String ZEN2_DISCOVERY_TYPE = "zen"; + public static final String SINGLE_NODE_DISCOVERY_TYPE = "single-node"; + public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", ZEN2_DISCOVERY_TYPE, Function.identity(), Property.NodeScope); public static final Setting> LEGACY_DISCOVERY_HOSTS_PROVIDER_SETTING = @@ -119,6 +119,8 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic List filteredSeedProviders = seedProviderNames.stream() .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); + String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); + final SeedHostsProvider seedHostsProvider = hostsResolver -> { final List addresses = new ArrayList<>(); for (SeedHostsProvider provider : filteredSeedProviders) { @@ -127,23 +129,20 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic return Collections.unmodifiableList(addresses); }; - Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put(ZEN_DISCOVERY_TYPE, - () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, seedHostsProvider, allocationService, joinValidators, gatewayMetaState)); - discoveryTypes.put(ZEN2_DISCOVERY_TYPE, () -> new Coordinator(NODE_NAME_SETTING.get(settings), settings, clusterSettings, - transportService, namedWriteableRegistry, allocationService, masterService, - () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, clusterApplier, - joinValidators, new Random(Randomness.get().nextLong()))); - discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier, - gatewayMetaState)); - String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - Supplier discoverySupplier = discoveryTypes.get(discoveryType); - if (discoverySupplier == null) { + if (ZEN2_DISCOVERY_TYPE.equals(discoveryType) || SINGLE_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + discovery = new Coordinator(NODE_NAME_SETTING.get(settings), + settings, clusterSettings, + transportService, namedWriteableRegistry, allocationService, masterService, + () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, + clusterApplier, joinValidators, new Random(Randomness.get().nextLong())); + } else if (ZEN_DISCOVERY_TYPE.equals(discoveryType)) { + discovery = new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, + clusterSettings, seedHostsProvider, allocationService, joinValidators, gatewayMetaState); + } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } + logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); - discovery = Objects.requireNonNull(discoverySupplier.get()); } private List getSeedProviderNames(Settings settings) { diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 58248b8183d34..c2ec9ae190579 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -69,7 +69,7 @@ public abstract class PeerFinder { - protected final Logger logger = LogManager.getLogger(getClass()); + private static final Logger logger = LogManager.getLogger(PeerFinder.class); public static final String REQUEST_PEERS_ACTION_NAME = "internal:discovery/request_peers"; @@ -308,7 +308,7 @@ public String toString() { return peersRemoved; } - private void startProbe(TransportAddress transportAddress) { + protected void startProbe(TransportAddress transportAddress) { assert holdsLock() : "PeerFinder mutex not held"; if (active == false) { logger.trace("startProbe({}) not running", transportAddress); diff --git a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java index b656cc288e37f..926216b9b686e 100644 --- a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java +++ b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java @@ -23,24 +23,46 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.discovery.PeerFinder.ConfiguredHostsResolver; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import java.util.stream.Collectors; public class SeedHostsResolver extends AbstractLifecycleComponent implements ConfiguredHostsResolver { + public static final Setting LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = + Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Setting.Property.NodeScope, + Setting.Property.Deprecated); + public static final Setting LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT = + Setting.positiveTimeSetting("discovery.zen.ping.unicast.hosts.resolve_timeout", TimeValue.timeValueSeconds(5), + Setting.Property.NodeScope, Setting.Property.Deprecated); + public static final Setting DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING = + Setting.intSetting("discovery.seed_resolver.max_concurrent_resolvers", 10, 0, Setting.Property.NodeScope); + public static final Setting DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.seed_resolver.timeout", TimeValue.timeValueSeconds(5), Setting.Property.NodeScope); + private static final Logger logger = LogManager.getLogger(SeedHostsResolver.class); private final Settings settings; @@ -58,8 +80,109 @@ public SeedHostsResolver(String nodeName, Settings settings, TransportService tr this.nodeName = nodeName; this.transportService = transportService; this.hostsProvider = seedProvider; - resolveTimeout = UnicastZenPing.getResolveTimeout(settings); - concurrentConnects = UnicastZenPing.getMaxConcurrentResolvers(settings); + resolveTimeout = getResolveTimeout(settings); + concurrentConnects = getMaxConcurrentResolvers(settings); + } + + public static int getMaxConcurrentResolvers(Settings settings) { + if (LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.exists(settings)) { + if (DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.exists(settings)) { + throw new IllegalArgumentException("it is forbidden to set both [" + + DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.getKey() + "] and [" + + LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.getKey() + "]"); + } + return LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); + } + return DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.get(settings); + } + + public static TimeValue getResolveTimeout(Settings settings) { + if (LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.exists(settings)) { + if (DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.exists(settings)) { + throw new IllegalArgumentException("it is forbidden to set both [" + + DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.getKey() + "] and [" + + LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.getKey() + "]"); + } + return LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); + } + return DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.get(settings); + } + + /** + * Resolves a list of hosts to a list of transport addresses. Each host is resolved into a transport address (or a collection of + * addresses if the number of ports is greater than one). Host lookups are done in parallel using specified executor service up + * to the specified resolve timeout. + * + * @param executorService the executor service used to parallelize hostname lookups + * @param logger logger used for logging messages regarding hostname lookups + * @param hosts the hosts to resolve + * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport) + * @param transportService the transport service + * @param resolveTimeout the timeout before returning from hostname lookups + * @return a list of resolved transport addresses + */ + public static List resolveHostsLists( + final ExecutorService executorService, + final Logger logger, + final List hosts, + final int limitPortCounts, + final TransportService transportService, + final TimeValue resolveTimeout) { + Objects.requireNonNull(executorService); + Objects.requireNonNull(logger); + Objects.requireNonNull(hosts); + Objects.requireNonNull(transportService); + Objects.requireNonNull(resolveTimeout); + if (resolveTimeout.nanos() < 0) { + throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]"); + } + // create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete + final List> callables = + hosts + .stream() + .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) + .collect(Collectors.toList()); + final List> futures; + try { + futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return Collections.emptyList(); + } + final List transportAddresses = new ArrayList<>(); + final Set localAddresses = new HashSet<>(); + localAddresses.add(transportService.boundAddress().publishAddress()); + localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses())); + // ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the + // hostname with the corresponding task by iterating together + final Iterator it = hosts.iterator(); + for (final Future future : futures) { + final String hostname = it.next(); + if (!future.isCancelled()) { + assert future.isDone(); + try { + final TransportAddress[] addresses = future.get(); + logger.trace("resolved host [{}] to {}", hostname, addresses); + for (int addressId = 0; addressId < addresses.length; addressId++) { + final TransportAddress address = addresses[addressId]; + // no point in pinging ourselves + if (localAddresses.contains(address) == false) { + transportAddresses.add(address); + } + } + } catch (final ExecutionException e) { + assert e.getCause() != null; + final String message = "failed to resolve host [" + hostname + "]"; + logger.warn(message, e.getCause()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore + } + } else { + logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname); + } + } + return Collections.unmodifiableList(transportAddresses); } @Override @@ -102,7 +225,7 @@ protected void doRun() { List providedAddresses = hostsProvider.getSeedAddresses((hosts, limitPortCounts) - -> UnicastZenPing.resolveHostsLists(executorService.get(), logger, hosts, limitPortCounts, + -> resolveHostsLists(executorService.get(), logger, hosts, limitPortCounts, transportService, resolveTimeout)); consumer.accept(providedAddresses); diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java deleted file mode 100644 index 2a415a74cd0cc..0000000000000 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.elasticsearch.cluster.service.ClusterApplierService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.gateway.GatewayMetaState; -import org.elasticsearch.transport.TransportService; - -import java.util.Objects; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; - -/** - * A discovery implementation where the only member of the cluster is the local node. - */ -public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { - private static final Logger logger = LogManager.getLogger(SingleNodeDiscovery.class); - - private final ClusterName clusterName; - protected final TransportService transportService; - private final ClusterApplier clusterApplier; - private volatile ClusterState clusterState; - - public SingleNodeDiscovery(final Settings settings, final TransportService transportService, - final MasterService masterService, final ClusterApplier clusterApplier, - final GatewayMetaState gatewayMetaState) { - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - this.transportService = Objects.requireNonNull(transportService); - masterService.setClusterStateSupplier(() -> clusterState); - this.clusterApplier = clusterApplier; - - if (clusterApplier instanceof ClusterApplierService) { - ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState); - } - } - - @Override - public synchronized void publish(final ClusterChangedEvent event, ActionListener publishListener, - final AckListener ackListener) { - clusterState = event.state(); - ackListener.onCommit(TimeValue.ZERO); - - clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, new ClusterApplyListener() { - @Override - public void onSuccess(String source) { - publishListener.onResponse(null); - ackListener.onNodeAck(transportService.getLocalNode(), null); - } - - @Override - public void onFailure(String source, Exception e) { - publishListener.onFailure(e); - ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); - } - }); - } - - @Override - public DiscoveryStats stats() { - return new DiscoveryStats(null, null); - } - - @Override - public synchronized void startInitialJoin() { - if (lifecycle.started() == false) { - throw new IllegalStateException("can't start initial join when not started"); - } - // apply a fresh cluster state just so that state recovery gets triggered by GatewayService - // TODO: give discovery module control over GatewayService - clusterState = ClusterState.builder(clusterState).build(); - clusterApplier.onNewClusterState("single-node-start-initial-join", () -> clusterState, (source, e) -> {}); - } - - @Override - protected synchronized void doStart() { - // set initial state - DiscoveryNode localNode = transportService.getLocalNode(); - clusterState = createInitialState(localNode); - clusterApplier.setInitialState(clusterState); - } - - protected ClusterState createInitialState(DiscoveryNode localNode) { - ClusterState.Builder builder = ClusterState.builder(clusterName); - return builder.nodes(DiscoveryNodes.builder().add(localNode) - .localNodeId(localNode.getId()) - .masterNodeId(localNode.getId()) - .build()) - .blocks(ClusterBlocks.builder() - .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .build(); - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() { - - } - -} diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 550b25083fb96..9b8d9f14c2e8d 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -22,6 +22,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ValidateJoinRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -159,32 +160,6 @@ public void onFailure(Exception e) { } } - public static class ValidateJoinRequest extends TransportRequest { - private ClusterState state; - - public ValidateJoinRequest() {} - - public ValidateJoinRequest(ClusterState state) { - this.state = state; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.state = ClusterState.readFrom(in, null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - this.state.writeTo(out); - } - - public ClusterState getState() { - return state; - } - } - static class ValidateJoinRequestRequestHandler implements TransportRequestHandler { private final Supplier localNodeSupplier; private final Collection> joinValidators; @@ -199,7 +174,7 @@ static class ValidateJoinRequestRequestHandler implements TransportRequestHandle public void messageReceived(ValidateJoinRequest request, TransportChannel channel, Task task) throws Exception { DiscoveryNode node = localNodeSupplier.get(); assert node != null : "local node is null"; - joinValidators.stream().forEach(action -> action.accept(node, request.state)); + joinValidators.stream().forEach(action -> action.accept(node, request.getState())); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java b/server/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java index 018258066de8d..bfe011d28be38 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PendingClusterStatesQueue.java @@ -22,6 +22,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.CoordinationMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import java.util.ArrayList; @@ -246,7 +248,16 @@ public synchronized ClusterState getNextClusterStateToProcess() { } } assert stateToProcess.committed() : "should only return committed cluster state. found " + stateToProcess.state; - return stateToProcess.state; + + final ClusterState committedState = stateToProcess.state; + final CoordinationMetaData coordinationMetaData = committedState.coordinationMetaData(); + if (coordinationMetaData.getLastAcceptedConfiguration().equals(coordinationMetaData.getLastCommittedConfiguration())) { + return committedState; + } else { + return ClusterState.builder(committedState).metaData(MetaData.builder(committedState.metaData()) + .coordinationMetaData(CoordinationMetaData.builder(coordinationMetaData) + .lastCommittedConfiguration(committedState.getLastAcceptedConfiguration()).build())).build(); + } } /** returns all pending states, committed or not */ diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 59b8b9513d6aa..43f726e61727c 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -34,8 +34,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -46,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.discovery.SeedHostsProvider; +import org.elasticsearch.discovery.SeedHostsResolver; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -68,18 +67,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.Queue; import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -98,17 +90,6 @@ public class UnicastZenPing implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = - Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope, Property.Deprecated); - public static final Setting LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT = - Setting.positiveTimeSetting("discovery.zen.ping.unicast.hosts.resolve_timeout", TimeValue.timeValueSeconds(5), - Property.NodeScope, Property.Deprecated); - - public static final Setting DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING = - Setting.intSetting("discovery.seed_resolver.max_concurrent_resolvers", 10, 0, Property.NodeScope); - public static final Setting DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING = - Setting.positiveTimeSetting("discovery.seed_resolver.timeout", TimeValue.timeValueSeconds(5), Property.NodeScope); - private final ThreadPool threadPool; private final TransportService transportService; private final ClusterName clusterName; @@ -140,8 +121,8 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService this.hostsProvider = seedHostsProvider; this.contextProvider = contextProvider; - final int concurrentConnects = getMaxConcurrentResolvers(settings); - resolveTimeout = getResolveTimeout(settings); + final int concurrentConnects = SeedHostsResolver.getMaxConcurrentResolvers(settings); + resolveTimeout = SeedHostsResolver.getResolveTimeout(settings); nodeName = Node.NODE_NAME_SETTING.get(settings); logger.debug( "using max_concurrent_resolvers [{}], resolver timeout [{}]", @@ -162,85 +143,8 @@ public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService threadPool.getThreadContext()); } - /** - * Resolves a list of hosts to a list of transport addresses. Each host is resolved into a transport address (or a collection of - * addresses if the number of ports is greater than one). Host lookups are done in parallel using specified executor service up - * to the specified resolve timeout. - * - * @param executorService the executor service used to parallelize hostname lookups - * @param logger logger used for logging messages regarding hostname lookups - * @param hosts the hosts to resolve - * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport) - * @param transportService the transport service - * @param resolveTimeout the timeout before returning from hostname lookups - * @return a list of resolved transport addresses - */ - public static List resolveHostsLists( - final ExecutorService executorService, - final Logger logger, - final List hosts, - final int limitPortCounts, - final TransportService transportService, - final TimeValue resolveTimeout) { - Objects.requireNonNull(executorService); - Objects.requireNonNull(logger); - Objects.requireNonNull(hosts); - Objects.requireNonNull(transportService); - Objects.requireNonNull(resolveTimeout); - if (resolveTimeout.nanos() < 0) { - throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]"); - } - // create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete - final List> callables = - hosts - .stream() - .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) - .collect(Collectors.toList()); - final List> futures; - try { - futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return Collections.emptyList(); - } - final List transportAddresses = new ArrayList<>(); - final Set localAddresses = new HashSet<>(); - localAddresses.add(transportService.boundAddress().publishAddress()); - localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses())); - // ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the - // hostname with the corresponding task by iterating together - final Iterator it = hosts.iterator(); - for (final Future future : futures) { - final String hostname = it.next(); - if (!future.isCancelled()) { - assert future.isDone(); - try { - final TransportAddress[] addresses = future.get(); - logger.trace("resolved host [{}] to {}", hostname, addresses); - for (int addressId = 0; addressId < addresses.length; addressId++) { - final TransportAddress address = addresses[addressId]; - // no point in pinging ourselves - if (localAddresses.contains(address) == false) { - transportAddresses.add(address); - } - } - } catch (final ExecutionException e) { - assert e.getCause() != null; - final String message = "failed to resolve host [" + hostname + "]"; - logger.warn(message, e.getCause()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore - } - } else { - logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname); - } - } - return Collections.unmodifiableList(transportAddresses); - } - private SeedHostsProvider.HostsResolver createHostsResolver() { - return (hosts, limitPortCounts) -> resolveHostsLists(unicastZenPingExecutorService, logger, hosts, + return (hosts, limitPortCounts) -> SeedHostsResolver.resolveHostsLists(unicastZenPingExecutorService, logger, hosts, limitPortCounts, transportService, resolveTimeout); } @@ -671,27 +575,4 @@ protected Version getVersion() { return Version.CURRENT; // for tests } - public static int getMaxConcurrentResolvers(Settings settings) { - if (LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.exists(settings)) { - if (DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.exists(settings)) { - throw new IllegalArgumentException("it is forbidden to set both [" - + DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.getKey() + "] and [" - + LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.getKey() + "]"); - } - return LEGACY_DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); - } - return DISCOVERY_SEED_RESOLVER_MAX_CONCURRENT_RESOLVERS_SETTING.get(settings); - } - - public static TimeValue getResolveTimeout(Settings settings) { - if (LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.exists(settings)) { - if (DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.exists(settings)) { - throw new IllegalArgumentException("it is forbidden to set both [" - + DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.getKey() + "] and [" - + LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.getKey() + "]"); - } - return LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); - } - return DISCOVERY_SEED_RESOLVER_TIMEOUT_SETTING.get(settings); - } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 397d1ee1763dd..f7e6f8e949b37 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -19,12 +19,8 @@ package org.elasticsearch.env; -import java.io.UncheckedIOException; -import java.util.Iterator; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; @@ -34,22 +30,22 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -63,6 +59,7 @@ import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; @@ -74,6 +71,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -84,6 +82,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableSet; @@ -128,7 +128,11 @@ public Path resolve(ShardId shardId) { * ${data.paths}/nodes/{node.id}/indices/{index.uuid} */ public Path resolve(Index index) { - return indicesPath.resolve(index.getUUID()); + return resolve(index.getUUID()); + } + + Path resolve(String uuid) { + return indicesPath.resolve(uuid); } @Override @@ -440,7 +444,7 @@ private static String toString(Collection items) { public void deleteShardDirectorySafe(ShardId shardId, IndexSettings indexSettings) throws IOException, ShardLockObtainFailedException { final Path[] paths = availableShardPaths(shardId); logger.trace("deleting shard {} directory, paths: [{}]", shardId, paths); - try (ShardLock lock = shardLock(shardId)) { + try (ShardLock lock = shardLock(shardId, "shard deletion under lock")) { deleteShardDirectoryUnderLock(lock, indexSettings); } } @@ -532,7 +536,7 @@ private static boolean assertPathsDoNotExist(final Path[] paths) { private boolean isShardLocked(ShardId id) { try { - shardLock(id, 0).close(); + shardLock(id, "checking if shard is locked").close(); return false; } catch (ShardLockObtainFailedException ex) { return true; @@ -551,7 +555,7 @@ private boolean isShardLocked(ShardId id) { */ public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSettings indexSettings) throws IOException, ShardLockObtainFailedException { - final List locks = lockAllForIndex(index, indexSettings, lockTimeoutMS); + final List locks = lockAllForIndex(index, indexSettings, "deleting index directory", lockTimeoutMS); try { deleteIndexDirectoryUnderLock(index, indexSettings); } finally { @@ -586,7 +590,8 @@ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettin * @param lockTimeoutMS how long to wait for acquiring the indices shard locks * @return the {@link ShardLock} instances for this index. */ - public List lockAllForIndex(Index index, IndexSettings settings, long lockTimeoutMS) throws ShardLockObtainFailedException { + public List lockAllForIndex(final Index index, final IndexSettings settings, + final String lockDetails, final long lockTimeoutMS) throws ShardLockObtainFailedException { final int numShards = settings.getNumberOfShards(); if (numShards <= 0) { throw new IllegalArgumentException("settings must contain a non-null > 0 number of shards"); @@ -598,7 +603,7 @@ public List lockAllForIndex(Index index, IndexSettings settings, long try { for (int i = 0; i < numShards; i++) { long timeoutLeftMS = Math.max(0, lockTimeoutMS - TimeValue.nsecToMSec((System.nanoTime() - startTimeNS))); - allLocks.add(shardLock(new ShardId(index, i), timeoutLeftMS)); + allLocks.add(shardLock(new ShardId(index, i), lockDetails, timeoutLeftMS)); } success = true; } finally { @@ -619,10 +624,11 @@ public List lockAllForIndex(Index index, IndexSettings settings, long * Note: this method will return immediately if the lock can't be acquired. * * @param id the shard ID to lock + * @param details information about why the shard is being locked * @return the shard lock. Call {@link ShardLock#close()} to release the lock */ - public ShardLock shardLock(ShardId id) throws ShardLockObtainFailedException { - return shardLock(id, 0); + public ShardLock shardLock(ShardId id, final String details) throws ShardLockObtainFailedException { + return shardLock(id, details, 0); } /** @@ -631,11 +637,13 @@ public ShardLock shardLock(ShardId id) throws ShardLockObtainFailedException { * or recover from a different shard instance into it. If the shard lock can not be acquired * a {@link ShardLockObtainFailedException} is thrown * @param shardId the shard ID to lock + * @param details information about why the shard is being locked * @param lockTimeoutMS the lock timeout in milliseconds * @return the shard lock. Call {@link ShardLock#close()} to release the lock */ - public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws ShardLockObtainFailedException { - logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS); + public ShardLock shardLock(final ShardId shardId, final String details, + final long lockTimeoutMS) throws ShardLockObtainFailedException { + logger.trace("acquiring node shardlock on [{}], timeout [{}], details [{}]", shardId, lockTimeoutMS, details); final InternalShardLock shardLock; final boolean acquired; synchronized (shardLocks) { @@ -644,7 +652,7 @@ public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws Sha shardLock.incWaitCount(); acquired = false; } else { - shardLock = new InternalShardLock(shardId); + shardLock = new InternalShardLock(shardId, details); shardLocks.put(shardId, shardLock); acquired = true; } @@ -652,7 +660,7 @@ public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws Sha if (acquired == false) { boolean success = false; try { - shardLock.acquire(lockTimeoutMS); + shardLock.acquire(lockTimeoutMS, details); success = true; } finally { if (success == false) { @@ -671,11 +679,11 @@ protected void closeInternal() { } /** - * A functional interface that people can use to reference {@link #shardLock(ShardId, long)} + * A functional interface that people can use to reference {@link #shardLock(ShardId, String, long)} */ @FunctionalInterface public interface ShardLocker { - ShardLock lock(ShardId shardId, long lockTimeoutMS) throws ShardLockObtainFailedException; + ShardLock lock(ShardId shardId, String lockDetails, long lockTimeoutMS) throws ShardLockObtainFailedException; } /** @@ -698,11 +706,13 @@ private final class InternalShardLock { */ private final Semaphore mutex = new Semaphore(1); private int waitCount = 1; // guarded by shardLocks + private String lockDetails; private final ShardId shardId; - InternalShardLock(ShardId shardId) { + InternalShardLock(final ShardId shardId, final String details) { this.shardId = shardId; mutex.acquireUninterruptibly(); + lockDetails = details; } protected void release() { @@ -730,11 +740,14 @@ private void decWaitCount() { } } - void acquire(long timeoutInMillis) throws ShardLockObtainFailedException { + void acquire(long timeoutInMillis, final String details) throws ShardLockObtainFailedException { try { - if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) { + if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS)) { + lockDetails = details; + } else { throw new ShardLockObtainFailedException(shardId, - "obtaining shard lock timed out after " + timeoutInMillis + "ms"); + "obtaining shard lock timed out after " + timeoutInMillis + "ms, previous lock details: [" + lockDetails + + "] trying to lock for [" + details + "]"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -1041,28 +1054,48 @@ private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws } private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { - List shardDataPaths = collectIndexSubPaths(nodePaths, this::isShardPath); + List shardDataPaths = collectShardDataPaths(nodePaths); if (shardDataPaths.isEmpty() == false) { throw new IllegalStateException("Node is started with " + Node.NODE_DATA_SETTING.getKey() + "=false, but has shard data: " - + shardDataPaths); + + shardDataPaths + + ". Use 'elasticsearch-node repurpose' tool to clean up" + ); } } private void ensureNoIndexMetaData(final NodePath[] nodePaths) throws IOException { - List indexMetaDataPaths = collectIndexSubPaths(nodePaths, this::isIndexMetaDataPath); + List indexMetaDataPaths = collectIndexMetaDataPaths(nodePaths); if (indexMetaDataPaths.isEmpty() == false) { throw new IllegalStateException("Node is started with " + Node.NODE_DATA_SETTING.getKey() + "=false and " + Node.NODE_MASTER_SETTING.getKey() + "=false, but has index metadata: " - + indexMetaDataPaths); + + indexMetaDataPaths + + ". Use 'elasticsearch-node repurpose' tool to clean up" + ); } } - private List collectIndexSubPaths(NodePath[] nodePaths, Predicate subPathPredicate) throws IOException { + /** + * Collect the paths containing shard data in the indicated node paths. The returned paths will point to the shard data folder. + */ + static List collectShardDataPaths(NodePath[] nodePaths) throws IOException { + return collectIndexSubPaths(nodePaths, NodeEnvironment::isShardPath); + } + + + /** + * Collect the paths containing index meta data in the indicated node paths. The returned paths will point to the + * {@link MetaDataStateFormat#STATE_DIR_NAME} folder + */ + static List collectIndexMetaDataPaths(NodePath[] nodePaths) throws IOException { + return collectIndexSubPaths(nodePaths, NodeEnvironment::isIndexMetaDataPath); + } + + private static List collectIndexSubPaths(NodePath[] nodePaths, Predicate subPathPredicate) throws IOException { List indexSubPaths = new ArrayList<>(); for (NodePath nodePath : nodePaths) { Path indicesPath = nodePath.indicesPath; @@ -1084,12 +1117,12 @@ private List collectIndexSubPaths(NodePath[] nodePaths, Predicate su return indexSubPaths; } - private boolean isShardPath(Path path) { + private static boolean isShardPath(Path path) { return Files.isDirectory(path) && path.getFileName().toString().chars().allMatch(Character::isDigit); } - private boolean isIndexMetaDataPath(Path path) { + private static boolean isIndexMetaDataPath(Path path) { return Files.isDirectory(path) && path.getFileName().toString().equals(MetaDataStateFormat.STATE_DIR_NAME); } diff --git a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java new file mode 100644 index 0000000000000..7331d8528fc64 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +public class NodeRepurposeCommand extends ElasticsearchNodeCommand { + + private static final Logger logger = LogManager.getLogger(NodeRepurposeCommand.class); + + static final String ABORTED_BY_USER_MSG = ElasticsearchNodeCommand.ABORTED_BY_USER_MSG; + static final String FAILED_TO_OBTAIN_NODE_LOCK_MSG = ElasticsearchNodeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG; + static final String NO_CLEANUP = "Node has node.data=true -> no clean up necessary"; + static final String NO_DATA_TO_CLEAN_UP_FOUND = "No data to clean-up found"; + static final String NO_SHARD_DATA_TO_CLEAN_UP_FOUND = "No shard data to clean-up found"; + static final String PRE_V7_MESSAGE = + "No manifest file found. If you were previously running this node on Elasticsearch version 6, please proceed.\n" + + "If this node was ever started on Elasticsearch version 7 or higher, it might mean metadata corruption, please abort."; + + public NodeRepurposeCommand() { + super("Repurpose this node to another master/data role, cleaning up any excess persisted data"); + } + + void testExecute(Terminal terminal, OptionSet options, Environment env) throws Exception { + execute(terminal, options, env); + } + + @Override + protected boolean validateBeforeLock(Terminal terminal, Environment env) { + Settings settings = env.settings(); + if (DiscoveryNode.isDataNode(settings)) { + terminal.println(Terminal.Verbosity.NORMAL, NO_CLEANUP); + return false; + } + + return true; + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + assert DiscoveryNode.isDataNode(env.settings()) == false; + + if (DiscoveryNode.isMasterNode(env.settings()) == false) { + processNoMasterNoDataNode(terminal, dataPaths); + } else { + processMasterNoDataNode(terminal, dataPaths); + } + } + + private void processNoMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws IOException { + NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); + List shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting index metadata paths"); + List indexMetaDataPaths = NodeEnvironment.collectIndexMetaDataPaths(nodePaths); + + Set indexPaths = uniqueParentPaths(shardDataPaths, indexMetaDataPaths); + if (indexPaths.isEmpty()) { + terminal.println(Terminal.Verbosity.NORMAL, NO_DATA_TO_CLEAN_UP_FOUND); + return; + } + + Set indexUUIDs = indexUUIDsFor(indexPaths); + outputVerboseInformation(terminal, nodePaths, indexPaths, indexUUIDs); + + terminal.println(noMasterMessage(indexUUIDs.size(), shardDataPaths.size(), indexMetaDataPaths.size())); + outputHowToSeeVerboseInformation(terminal); + + final Manifest manifest = loadManifest(terminal, dataPaths); + + terminal.println("Node is being re-purposed as no-master and no-data. Clean-up of index data will be performed."); + confirm(terminal, "Do you want to proceed?"); + + if (manifest != null) { + rewriteManifest(terminal, manifest, dataPaths); + } + + removePaths(terminal, indexPaths); + + terminal.println("Node successfully repurposed to no-master and no-data."); + } + + private void processMasterNoDataNode(Terminal terminal, Path[] dataPaths) throws IOException { + NodeEnvironment.NodePath[] nodePaths = toNodePaths(dataPaths); + + terminal.println(Terminal.Verbosity.VERBOSE, "Collecting shard data paths"); + List shardDataPaths = NodeEnvironment.collectShardDataPaths(nodePaths); + if (shardDataPaths.isEmpty()) { + terminal.println(NO_SHARD_DATA_TO_CLEAN_UP_FOUND); + return; + } + + Set indexPaths = uniqueParentPaths(shardDataPaths); + Set indexUUIDs = indexUUIDsFor(indexPaths); + outputVerboseInformation(terminal, nodePaths, shardDataPaths, indexUUIDs); + + terminal.println(shardMessage(shardDataPaths.size(), indexUUIDs.size())); + outputHowToSeeVerboseInformation(terminal); + + terminal.println("Node is being re-purposed as master and no-data. Clean-up of shard data will be performed."); + confirm(terminal, "Do you want to proceed?"); + + removePaths(terminal, shardDataPaths); + + terminal.println("Node successfully repurposed to master and no-data."); + } + + private void outputVerboseInformation(Terminal terminal, NodeEnvironment.NodePath[] nodePaths, + Collection pathsToCleanup, Set indexUUIDs) { + if (terminal.isPrintable(Terminal.Verbosity.VERBOSE)) { + terminal.println(Terminal.Verbosity.VERBOSE, "Paths to clean up:"); + pathsToCleanup.forEach(p -> terminal.println(Terminal.Verbosity.VERBOSE, " " + p.toString())); + terminal.println(Terminal.Verbosity.VERBOSE, "Indices affected:"); + indexUUIDs.forEach(uuid -> terminal.println(Terminal.Verbosity.VERBOSE, " " + toIndexName(nodePaths, uuid))); + } + } + + private void outputHowToSeeVerboseInformation(Terminal terminal) { + if (terminal.isPrintable(Terminal.Verbosity.VERBOSE) == false) { + terminal.println("Use -v to see list of paths and indices affected"); + } + } + private String toIndexName(NodeEnvironment.NodePath[] nodePaths, String uuid) { + Path[] indexPaths = new Path[nodePaths.length]; + for (int i = 0; i < nodePaths.length; i++) { + indexPaths[i] = nodePaths[i].resolve(uuid); + } + try { + IndexMetaData metaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, indexPaths); + return metaData.getIndex().getName(); + } catch (Exception e) { + return "no name for uuid: " + uuid + ": " + e; + } + } + + private NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { + return Arrays.stream(dataPaths).map(NodeRepurposeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); + } + + private Set indexUUIDsFor(Set indexPaths) { + return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); + } + + static String noMasterMessage(int indexes, int shards, int indexMetaData) { + return "Found " + indexes + " indices (" + + shards + " shards and " + indexMetaData + " index meta data) to clean up"; + } + + static String shardMessage(int shards, int indices) { + return "Found " + shards + " shards in " + indices + " indices to clean up"; + } + + private void rewriteManifest(Terminal terminal, Manifest manifest, Path[] dataPaths) throws WriteStateException { + terminal.println(Terminal.Verbosity.VERBOSE, "Re-writing manifest"); + Manifest newManifest = new Manifest(manifest.getCurrentTerm(), manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), + new HashMap<>()); + Manifest.FORMAT.writeAndCleanup(newManifest, dataPaths); + } + + private Manifest loadManifest(Terminal terminal, Path[] dataPaths) throws IOException { + terminal.println(Terminal.Verbosity.VERBOSE, "Loading manifest"); + final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, dataPaths); + + if (manifest == null) { + terminal.println(Terminal.Verbosity.SILENT, PRE_V7_MESSAGE); + } + return manifest; + } + + private void removePaths(Terminal terminal, Collection paths) { + terminal.println(Terminal.Verbosity.VERBOSE, "Removing data"); + paths.forEach(this::removePath); + } + + private void removePath(Path path) { + try { + IOUtils.rm(path); + } catch (IOException e) { + throw new ElasticsearchException("Unable to clean up path: " + path + ": " + e.getMessage()); + } + } + + @SafeVarargs + @SuppressWarnings("varargs") + private final Set uniqueParentPaths(Collection... paths) { + // equals on Path is good enough here due to the way these are collected. + return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet()); + } + + private static NodeEnvironment.NodePath createNodePath(Path path) { + try { + return new NodeEnvironment.NodePath(path); + } catch (IOException e) { + throw new ElasticsearchException("Unable to investigate path: " + path + ": " + e.getMessage()); + } + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 056919ab1c4be..74eb3e08f002f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import java.util.Map; @@ -92,26 +90,6 @@ static ClusterState recoverClusterBlocks(final ClusterState state) { return ClusterState.builder(state).blocks(blocks).build(); } - static ClusterState closeBadIndices(final ClusterState clusterState, final IndicesService indicesService) { - final MetaData.Builder builder = MetaData.builder(clusterState.metaData()).removeAllIndices(); - - for (IndexMetaData metaData : clusterState.metaData()) { - try { - if (metaData.getState() == IndexMetaData.State.OPEN) { - // verify that we can actually create this index - if not we recover it as closed with lots of warn logs - indicesService.verifyIndexMetadata(metaData, metaData); - } - } catch (final Exception e) { - final Index electedIndex = metaData.getIndex(); - logger.warn(() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); - metaData = IndexMetaData.builder(metaData).state(IndexMetaData.State.CLOSE).build(); - } - builder.put(metaData, false); - } - - return ClusterState.builder(clusterState).metaData(builder).build(); - } - static ClusterState updateRoutingTable(final ClusterState state) { // initialize all index routing tables as empty final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index cffb672f0cfda..317bf63a4a651 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -126,7 +126,6 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } ClusterState recoveredState = Function.identity() .andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())) - .andThen(state -> ClusterStateUpdaters.closeBadIndices(state, indicesService)) .apply(ClusterState.builder(clusterService.getClusterName()).metaData(metaDataBuilder).build()); listener.onSuccess(recoveredState); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 67d9ab9a5bf88..82627cfdc0b82 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -23,14 +23,12 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -55,28 +53,19 @@ public class GatewayAllocator { asyncFetchStore = ConcurrentCollections.newConcurrentMap(); @Inject - public GatewayAllocator(ClusterService clusterService, RoutingService routingService, - TransportNodesListGatewayStartedShards startedAction, TransportNodesListShardStoreMetaData storeAction) { + public GatewayAllocator(RoutingService routingService, + TransportNodesListGatewayStartedShards startedAction, + TransportNodesListShardStoreMetaData storeAction) { this.routingService = routingService; this.primaryShardAllocator = new InternalPrimaryShardAllocator(startedAction); this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction); - clusterService.addStateApplier(event -> { - boolean cleanCache = false; - DiscoveryNode localNode = event.state().nodes().getLocalNode(); - if (localNode != null) { - if (localNode.isMasterNode() && event.localNodeMaster() == false) { - cleanCache = true; - } - } else { - cleanCache = true; - } - if (cleanCache) { - Releasables.close(asyncFetchStarted.values()); - asyncFetchStarted.clear(); - Releasables.close(asyncFetchStore.values()); - asyncFetchStore.clear(); - } - }); + } + + public void cleanCaches() { + Releasables.close(asyncFetchStarted.values()); + asyncFetchStarted.clear(); + Releasables.close(asyncFetchStore.values()); + asyncFetchStore.clear(); } // for tests diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index f14d86c7602bf..30361fa70ee6b 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -137,7 +137,6 @@ public void applyClusterStateUpdaters() { .andThen(ClusterStateUpdaters::addStateNotRecoveredBlock) .andThen(state -> ClusterStateUpdaters.setLocalNode(state, transportService.getLocalNode())) .andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())) - .andThen(state -> ClusterStateUpdaters.closeBadIndices(state, indicesService)) .andThen(ClusterStateUpdaters::recoverClusterBlocks) .apply(previousClusterState); } @@ -321,7 +320,14 @@ long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteState finished = true; return generation; } catch (WriteStateException e) { - rollback(); + // if Manifest write results in dirty WriteStateException it's not safe to remove + // new metadata files, because if Manifest was actually written to disk and its deletion + // fails it will reference these new metadata files. + // In the future, we might decide to add more fine grained check to understand if after + // WriteStateException Manifest deletion has actually failed. + if (e.isDirty() == false) { + rollback(); + } throw e; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 04eb14669e7a2..a8960387e6f58 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -206,10 +206,19 @@ private void performStateRecovery(final boolean enforceRecoverAfterTime, final S if (enforceRecoverAfterTime && recoverAfterTime != null) { if (scheduledRecovery.compareAndSet(false, true)) { logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason); - threadPool.schedule(() -> { - if (recovered.compareAndSet(false, true)) { - logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); - recoveryRunnable.run(); + threadPool.schedule(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("delayed state recovery failed", e); + resetRecoveredFlags(); + } + + @Override + protected void doRun() { + if (recovered.compareAndSet(false, true)) { + logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); + recoveryRunnable.run(); + } } }, recoverAfterTime, ThreadPool.Names.GENERIC); } @@ -218,10 +227,8 @@ private void performStateRecovery(final boolean enforceRecoverAfterTime, final S threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(final Exception e) { - logger.warn("Recovery failed", e); - // we reset `recovered` in the listener don't reset it here otherwise there might be a race - // that resets it to false while a new recover is already running? - GatewayService.this.onFailure("state recovery failed: " + e.getMessage()); + logger.warn("state recovery failed", e); + resetRecoveredFlags(); } @Override @@ -233,11 +240,9 @@ protected void doRun() { } } - private void onFailure(final String message) { + private void resetRecoveredFlags() { recovered.set(false); scheduledRecovery.set(false); - // don't remove the block here, we don't want to allow anything in such a case - logger.info("metadata state not restored, reason: {}", message); } class RecoverStateUpdateTask extends ClusterStateUpdateTask { @@ -257,10 +262,16 @@ public void clusterStateProcessed(final String source, final ClusterState oldSta logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size()); } + @Override + public void onNoLongerMaster(String source) { + logger.debug("stepped down as master before recovering state [{}]", source); + resetRecoveredFlags(); + } + @Override public void onFailure(final String source, final Exception e) { logger.info(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); - GatewayService.this.onFailure("failed to update cluster state"); + resetRecoveredFlags(); } } @@ -280,7 +291,8 @@ public ClusterState execute(final ClusterState currentState) { @Override public void onFailure(final String msg) { - GatewayService.this.onFailure(msg); + logger.info("state recovery failed: {}", msg); + resetRecoveredFlags(); } } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 543ec9be75d17..d4e6362085238 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -248,6 +248,18 @@ public void beforeIndexAddedToCluster(Index index, Settings indexSettings) { } } + @Override + public void onStoreCreated(ShardId shardId) { + for (IndexEventListener listener : listeners) { + try { + listener.onStoreCreated(shardId); + } catch (Exception e) { + logger.warn("failed to invoke on store created", e); + throw e; + } + } + } + @Override public void onStoreClosed(ShardId shardId) { for (IndexEventListener listener : listeners) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 1b1784495e685..9493d2e174968 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -334,7 +334,7 @@ public synchronized IndexShard createShard( IndexShard indexShard = null; ShardLock lock = null; try { - lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); + lock = nodeEnv.shardLock(shardId, "shard creation", TimeUnit.SECONDS.toMillis(5)); eventListener.beforeIndexShardCreated(shardId, indexSettings); ShardPath path; try { @@ -388,6 +388,7 @@ public synchronized IndexShard createShard( DirectoryService directoryService = indexStore.newDirectoryService(path); store = new Store(shardId, this.indexSettings, directoryService.newDirectory(), lock, new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); + eventListener.onStoreCreated(shardId); indexShard = new IndexShard( routing, this.indexSettings, @@ -789,7 +790,9 @@ private void maybeSyncGlobalCheckpoints() { } private void syncRetentionLeases() { - sync(IndexShard::syncRetentionLeases, "retention lease"); + if (indexSettings.isSoftDeleteEnabled()) { + sync(IndexShard::syncRetentionLeases, "retention lease"); + } } private void sync(final Consumer sync, final String source) { @@ -806,7 +809,11 @@ private void sync(final Consumer sync, final String source) { case STARTED: try { shard.runUnderPrimaryPermit( - () -> sync.accept(shard), + () -> { + if (shard.isRelocatedPrimary() == false) { + sync.accept(shard); + } + }, e -> { if (e instanceof AlreadyClosedException == false && e instanceof IndexShardClosedException == false) { @@ -926,7 +933,7 @@ public String toString() { public static final Setting RETENTION_LEASE_SYNC_INTERVAL_SETTING = Setting.timeSetting( "index.soft_deletes.retention_lease.sync_interval", - new TimeValue(5, TimeUnit.MINUTES), + new TimeValue(30, TimeUnit.SECONDS), new TimeValue(0, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 97b499f9bd309..332155643055c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -243,9 +243,11 @@ public final class IndexSettings { /** * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. + * Soft-deletes is enabled by default for 7.0+ indices. */ - public static final Setting INDEX_SOFT_DELETES_SETTING = - Setting.boolSetting("index.soft_deletes.enabled", true, Property.IndexScope, Property.Final); + public static final Setting INDEX_SOFT_DELETES_SETTING = Setting.boolSetting("index.soft_deletes.enabled", + settings -> Boolean.toString(IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(Version.V_7_0_0)), + Property.IndexScope, Property.Final); /** * Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted @@ -259,9 +261,9 @@ public final class IndexSettings { /** * Controls the maximum length of time since a retention lease is created or renewed before it is considered expired. */ - public static final Setting INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING = + public static final Setting INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING = Setting.timeSetting( - "index.soft_deletes.retention.lease", + "index.soft_deletes.retention_lease.period", TimeValue.timeValueHours(12), TimeValue.ZERO, Property.Dynamic, @@ -458,7 +460,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING).millis(); + retentionLeaseMillis = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING).millis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); @@ -527,7 +529,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled); - scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING, this::setRetentionLeaseMillis); + scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { this.searchIdleAfter = searchIdleAfter; } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java index 123802c951097..5776edd69fc83 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java @@ -19,9 +19,11 @@ package org.elasticsearch.index.analysis; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; @@ -32,12 +34,16 @@ * Provides pre-configured, shared {@link TokenFilter}s. */ public final class PreConfiguredTokenFilter extends PreConfiguredAnalysisComponent { + + private static final DeprecationLogger DEPRECATION_LOGGER + = new DeprecationLogger(LogManager.getLogger(PreConfiguredTokenFilter.class)); + /** * Create a pre-configured token filter that may not vary at all. */ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterForMultitermQueries, Function create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream)); } @@ -45,27 +51,37 @@ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterF * Create a pre-configured token filter that may not vary at all. */ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterForMultitermQueries, - boolean useFilterForParsingSynonyms, + boolean allowForSynonymParsing, Function create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, allowForSynonymParsing, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream)); } /** - * Create a pre-configured token filter that may not vary at all. + * Create a pre-configured token filter that may vary based on the Elasticsearch version. */ public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ONE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ONE, (tokenStream, version) -> create.apply(tokenStream, version)); } + /** + * Create a pre-configured token filter that may vary based on the Elasticsearch version. + */ + public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + boolean useFilterForParsingSynonyms, + BiFunction create) { + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, useFilterForParsingSynonyms, CachingStrategy.ONE, + (tokenStream, version) -> create.apply(tokenStream, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ public static PreConfiguredTokenFilter luceneVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.LUCENE, + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.LUCENE, (tokenStream, version) -> create.apply(tokenStream, version.luceneVersion)); } @@ -74,18 +90,18 @@ public static PreConfiguredTokenFilter luceneVersion(String name, boolean useFil */ public static PreConfiguredTokenFilter elasticsearchVersion(String name, boolean useFilterForMultitermQueries, BiFunction create) { - return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, false, CachingStrategy.ELASTICSEARCH, create); + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, true, CachingStrategy.ELASTICSEARCH, create); } private final boolean useFilterForMultitermQueries; - private final boolean useFilterForParsingSynonyms; + private final boolean allowForSynonymParsing; private final BiFunction create; - private PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries, boolean useFilterForParsingSynonyms, + private PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries, boolean allowForSynonymParsing, PreBuiltCacheFactory.CachingStrategy cache, BiFunction create) { super(name, cache); this.useFilterForMultitermQueries = useFilterForMultitermQueries; - this.useFilterForParsingSynonyms = useFilterForParsingSynonyms; + this.allowForSynonymParsing = allowForSynonymParsing; this.create = create; } @@ -118,10 +134,17 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (useFilterForParsingSynonyms) { + if (allowForSynonymParsing) { + return this; + } + if (version.onOrAfter(Version.V_7_0_0)) { + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); + } + else { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(name(), "Token filter [" + name() + + "] will not be usable to parse synonyms after v7.0"); return this; } - return IDENTITY_FILTER; } }; } @@ -138,10 +161,17 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (useFilterForParsingSynonyms) { + if (allowForSynonymParsing) { + return this; + } + if (version.onOrAfter(Version.V_7_0_0)) { + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); + } + else { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(name(), "Token filter [" + name() + + "] will not be usable to parse synonyms after v7.0"); return this; } - return IDENTITY_FILTER; } }; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index d10690379eddd..addb16d58d031 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -191,6 +191,13 @@ private static int indexOfKeptCommits(List commits, long return 0; } + /** + * Checks whether the deletion policy is holding on to snapshotted commits + */ + synchronized boolean hasSnapshottedCommits() { + return snapshottedCommits.isEmpty() == false; + } + /** * Checks if the deletion policy can release some index commits with the latest global checkpoint. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index e450e93e9d397..e4c9d6156d197 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -114,7 +114,6 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; - public static final String RETENTION_LEASES = "retention_leases"; public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; protected final ShardId shardId; @@ -265,6 +264,20 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Performs the pre-closing checks on the {@link Engine}. + * + * @throws IllegalStateException if the sanity checks failed + */ + public void verifyEngineBeforeIndexClosing() throws IllegalStateException { + final long globalCheckpoint = engineConfig.getGlobalCheckpointSupplier().getAsLong(); + final long maxSeqNo = getSeqNoStats(globalCheckpoint).getMaxSeqNo(); + if (globalCheckpoint != maxSeqNo) { + throw new IllegalStateException("Global checkpoint [" + globalCheckpoint + + "] mismatches maximum sequence number [" + maxSeqNo + "] on index shard " + shardId); + } + } + /** * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling @@ -387,7 +400,7 @@ public Condition newCondition() { */ public abstract DeleteResult delete(Delete delete) throws IOException; - public abstract NoOpResult noOp(NoOp noOp); + public abstract NoOpResult noOp(NoOp noOp) throws IOException; /** * Base class for index and delete operation results @@ -620,14 +633,14 @@ protected final GetResult getFromSearcher(Get get, BiFunction segments = new HashMap<>(); // first, go over and compute the search ones... @@ -960,8 +972,8 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole segment = new Segment(info.info.name); segment.search = false; segment.committed = true; - segment.docCount = info.info.maxDoc(); - segment.delDocCount = info.getDelCount(); + segment.delDocCount = info.getDelCount() + info.getSoftDelCount(); + segment.docCount = info.info.maxDoc() - segment.delDocCount; segment.version = info.info.getVersion(); segment.compound = info.info.getUseCompoundFile(); try { @@ -1940,7 +1952,7 @@ public interface TranslogRecoveryRunner { * Moreover, operations that are optimized using the MSU optimization must not be processed twice as this will create duplicates * in Lucene. To avoid this we check the local checkpoint tracker to see if an operation was already processed. * - * @see #initializeMaxSeqNoOfUpdatesOrDeletes() + * @see #reinitializeMaxSeqNoOfUpdatesOrDeletes() * @see #advanceMaxSeqNoOfUpdatesOrDeletes(long) */ public final long getMaxSeqNoOfUpdatesOrDeletes() { @@ -1948,10 +1960,10 @@ public final long getMaxSeqNoOfUpdatesOrDeletes() { } /** - * A primary shard calls this method once to initialize the max_seq_no_of_updates marker using the + * A primary shard calls this method to re-initialize the max_seq_no_of_updates marker using the * max_seq_no from Lucene index and translog before replaying the local translog in its local recovery. */ - public abstract void initializeMaxSeqNoOfUpdatesOrDeletes(); + public abstract void reinitializeMaxSeqNoOfUpdatesOrDeletes(); /** * A replica shard receives a new max_seq_no_of_updates from its primary shard, then calls this method diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index acedd8356ea9e..bc2552310f916 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -51,7 +51,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; @@ -75,7 +74,6 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; -import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; @@ -476,6 +474,11 @@ Translog getTranslog() { return translog; } + // Package private for testing purposes only + boolean hasSnapshottedCommits() { + return combinedDeletionPolicy.hasSnapshottedCommits(); + } + @Override public boolean isTranslogSyncNeeded() { return getTranslog().syncNeeded(); @@ -497,16 +500,11 @@ public void syncTranslog() throws IOException { } /** - * Creates a new history snapshot for reading operations since the provided seqno. - * The returned snapshot can be retrieved from either Lucene index or translog files. + * Creates a new history snapshot for reading operations since the provided seqno from the translog. */ @Override public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); - } else { - return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); - } + return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); } /** @@ -621,13 +619,13 @@ public GetResult get(Get get, BiFunction search return GetResult.NOT_EXISTS; } if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term )) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.getIfSeqNo(), get.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); } if (get.isReadFromTranslog()) { @@ -948,6 +946,7 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO } } } + markSeqNoAsSeen(index.seqNo()); return plan; } @@ -987,13 +986,13 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc currentNotFoundOrDeleted = versionValue.isDelete(); } if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.versionType().isVersionConflictForWrites( @@ -1301,6 +1300,7 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws delete.seqNo(), delete.version()); } } + markSeqNoAsSeen(delete.seqNo()); return plan; } @@ -1326,13 +1326,13 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE } final DeletionStrategy plan; if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) { @@ -1452,12 +1452,19 @@ public void maybePruneDeletes() { } @Override - public NoOpResult noOp(final NoOp noOp) { - NoOpResult noOpResult; + public NoOpResult noOp(final NoOp noOp) throws IOException { + final NoOpResult noOpResult; try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + markSeqNoAsSeen(noOp.seqNo()); noOpResult = innerNoOp(noOp); } catch (final Exception e) { - noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), e); + try { + maybeFailEngine("noop", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; } return noOpResult; } @@ -2344,13 +2351,7 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(HISTORY_UUID_KEY, historyUUID); if (softDeleteEnabled) { - /* - * We sample these from the policy (which occurs under a lock) to ensure that we have a consistent view of the minimum - * retained sequence number, and the retention leases. - */ - final Tuple retentionPolicy = softDeletesPolicy.getRetentionPolicy(); - commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(retentionPolicy.v1())); - commitData.put(Engine.RETENTION_LEASES, RetentionLeases.encodeRetentionLeases(retentionPolicy.v2())); + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); @@ -2429,9 +2430,11 @@ public long getLocalCheckpoint() { return localCheckpointTracker.getCheckpoint(); } - @Override - public void waitForOpsToComplete(long seqNo) throws InterruptedException { - localCheckpointTracker.waitForOpsToComplete(seqNo); + /** + * Marks the given seq_no as seen and advances the max_seq_no of this engine to at least that value. + */ + protected final void markSeqNoAsSeen(long seqNo) { + localCheckpointTracker.advanceMaxSeqNo(seqNo); } /** @@ -2541,36 +2544,40 @@ public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperS @Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { - if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - return getMinRetainedSeqNo() <= startingSeqNo; - } else { - final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); - final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); - try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { - Translog.Operation operation; - while ((operation = snapshot.next()) != null) { - if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsCompleted(operation.seqNo()); - } + final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); + try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + tracker.markSeqNoAsCompleted(operation.seqNo()); } } - return tracker.getCheckpoint() >= currentLocalCheckpoint; } + return tracker.getCheckpoint() >= currentLocalCheckpoint; } /** * Returns the minimum seqno that is retained in the Lucene index. * Operations whose seq# are at least this value should exist in the Lucene index. */ - final long getMinRetainedSeqNo() { + public final long getMinRetainedSeqNo() { assert softDeleteEnabled : Thread.currentThread().getName(); return softDeletesPolicy.getMinRetainedSeqNo(); } @Override - public Closeable acquireRetentionLockForPeerRecovery() { + public Closeable acquireRetentionLock() { if (softDeleteEnabled) { - return softDeletesPolicy.acquireRetentionLock(); + final Releasable softDeletesRetentionLock = softDeletesPolicy.acquireRetentionLock(); + final Closeable translogRetentionLock; + try { + translogRetentionLock = translog.acquireRetentionLock(); + } catch (Exception e) { + softDeletesRetentionLock.close(); + throw e; + } + return () -> IOUtils.close(translogRetentionLock, softDeletesRetentionLock); } else { return translog.acquireRetentionLock(); } @@ -2723,9 +2730,7 @@ private boolean assertMaxSeqNoOfUpdatesIsAdvanced(Term id, long seqNo, boolean a } @Override - public void initializeMaxSeqNoOfUpdatesOrDeletes() { - assert getMaxSeqNoOfUpdatesOrDeletes() == SequenceNumbers.UNASSIGNED_SEQ_NO : - "max_seq_no_of_updates is already initialized [" + getMaxSeqNoOfUpdatesOrDeletes() + "]"; + public void reinitializeMaxSeqNoOfUpdatesOrDeletes() { final long maxSeqNo = SequenceNumbers.max(localCheckpointTracker.getMaxSeqNo(), translog.getMaxSeqNo()); advanceMaxSeqNoOfUpdatesOrDeletes(maxSeqNo); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 5c09708b62cae..5b10db69e94c7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -34,6 +34,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SeqNoStats; @@ -142,6 +143,16 @@ protected void assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final } } + @Override + public void verifyEngineBeforeIndexClosing() throws IllegalStateException { + // the value of the global checkpoint is verified when the read-only engine is opened, + // and it is not expected to change during the lifecycle of the engine. We could also + // check this value before closing the read-only engine but if something went wrong + // and the global checkpoint is not in-sync with the max. sequence number anymore, + // checking the value here again would prevent the read-only engine to be closed and + // reopened as an internal engine, which would be the path to fix the issue. + } + protected final DirectoryReader wrapReader(DirectoryReader reader, Function readerWrapperFunction) throws IOException { reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); @@ -262,7 +273,7 @@ public void syncTranslog() { } @Override - public Closeable acquireRetentionLockForPeerRecovery() { + public Closeable acquireRetentionLock() { return () -> {}; } @@ -277,18 +288,7 @@ public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperS @Override public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { - return new Translog.Snapshot() { - @Override - public void close() { } - @Override - public int totalOperations() { - return 0; - } - @Override - public Translog.Operation next() { - return null; - } - }; + return newEmptySnapshot(); } @Override @@ -301,6 +301,11 @@ public boolean hasCompleteOperationHistory(String source, MapperService mapperSe return false; } + @Override + public long getMinRetainedSeqNo() { + throw new UnsupportedOperationException(); + } + @Override public TranslogStats getTranslogStats() { return translogStats; @@ -316,10 +321,6 @@ public long getLocalCheckpoint() { return seqNoStats.getLocalCheckpoint(); } - @Override - public void waitForOpsToComplete(long seqNo) { - } - @Override public SeqNoStats getSeqNoStats(long globalCheckpoint) { return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint); @@ -414,7 +415,15 @@ public int fillSeqNoGaps(long primaryTerm) { } @Override - public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) { + public Engine recoverFromTranslog(final TranslogRecoveryRunner translogRecoveryRunner, final long recoverUpToSeqNo) { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + try (Translog.Snapshot snapshot = newEmptySnapshot()) { + translogRecoveryRunner.run(this, snapshot); + } catch (final Exception e) { + throw new EngineException(shardId, "failed to recover from empty translog snapshot", e); + } + } return this; } @@ -441,7 +450,7 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { } @Override - public void initializeMaxSeqNoOfUpdatesOrDeletes() { + public void reinitializeMaxSeqNoOfUpdatesOrDeletes() { advanceMaxSeqNoOfUpdatesOrDeletes(seqNoStats.getMaxSeqNo()); } @@ -453,4 +462,22 @@ protected void processReaders(IndexReader reader, IndexReader previousReader) { public boolean refreshNeeded() { return false; } + + private Translog.Snapshot newEmptySnapshot() { + return new Translog.Snapshot() { + @Override + public void close() { + } + + @Override + public int totalOperations() { + return 0; + } + + @Override + public Translog.Operation next() { + return null; + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java index 49b8f9d3483f2..2d9fc0077324d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java @@ -21,7 +21,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.search.Query; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.seqno.RetentionLease; @@ -107,10 +106,6 @@ private synchronized void releaseRetentionLock() { * Operations whose seq# is least this value should exist in the Lucene index. */ synchronized long getMinRetainedSeqNo() { - return getRetentionPolicy().v1(); - } - - public synchronized Tuple getRetentionPolicy() { /* * When an engine is flushed, we need to provide it the latest collection of retention leases even when the soft deletes policy is * locked for peer recovery. @@ -125,7 +120,7 @@ public synchronized Tuple getRetentionPolicy() { * localCheckpointOfSafeCommit. * - Changes APIs are driven by a combination of the global checkpoint, retention operations, and retention leases. Here we * prefer using the global checkpoint instead of the maximum sequence number because only operations up to the global - * checkpoint are exposed in the the changes APIs. + * checkpoint are exposed in the changes APIs. */ // calculate the minimum sequence number to retain based on retention leases @@ -137,11 +132,13 @@ public synchronized Tuple getRetentionPolicy() { .orElse(Long.MAX_VALUE); /* * The minimum sequence number to retain is the minimum of the minimum based on retention leases, and the number of operations - * below the global checkpoint to retain (index.soft_deletes.retention.operations). + * below the global checkpoint to retain (index.soft_deletes.retention.operations). The additional increments on the global + * checkpoint and the local checkpoint of the safe commit are due to the fact that we want to retain all operations above + * those checkpoints. */ final long minSeqNoForQueryingChanges = - Math.min(globalCheckpointSupplier.getAsLong() - retentionOperations, minimumRetainingSequenceNumber); - final long minSeqNoToRetain = Math.min(minSeqNoForQueryingChanges, localCheckpointOfSafeCommit) + 1; + Math.min(1 + globalCheckpointSupplier.getAsLong() - retentionOperations, minimumRetainingSequenceNumber); + final long minSeqNoToRetain = Math.min(minSeqNoForQueryingChanges, 1 + localCheckpointOfSafeCommit); /* * We take the maximum as minSeqNoToRetain can go backward as the retention operations value can be changed in settings, or from @@ -149,7 +146,7 @@ public synchronized Tuple getRetentionPolicy() { */ minRetainedSeqNo = Math.max(minRetainedSeqNo, minSeqNoToRetain); } - return Tuple.tuple(minRetainedSeqNo, retentionLeases); + return minRetainedSeqNo; } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 357c9c107836e..0f6c217409c30 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -28,25 +28,25 @@ public class VersionConflictEngineException extends EngineException { public VersionConflictEngineException(ShardId shardId, Engine.Operation op, long currentVersion, boolean deleted) { - this(shardId, op.type(), op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); + this(shardId, op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); } - public VersionConflictEngineException(ShardId shardId, String type, String id, + public VersionConflictEngineException(ShardId shardId, String id, long compareAndWriteSeqNo, long compareAndWriteTerm, long currentSeqNo, long currentTerm) { - this(shardId, type, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + + this(shardId, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + (currentSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? " but no document was found" : " current document has seqNo [" + currentSeqNo + "] and primary term ["+ currentTerm + "]" )); } - public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) { - this(shardId, null, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, String id, String explanation) { + this(shardId, null, id, explanation); } - public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) { - this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { + this(shardId, "[{}]: version conflict, {}", cause, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 6aad80c4421e4..afd1d9e368480 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -112,15 +112,15 @@ protected void resize(int newSize) { } public long getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } - return values[0]; + return get(0); } @Override public Long get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } return values[index]; } @@ -151,15 +151,15 @@ public Dates(SortedNumericDocValues in, boolean isNanos) { * in. */ public JodaCompatibleZonedDateTime getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } return get(0); } @Override public JodaCompatibleZonedDateTime get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } if (index >= count) { throw new IndexOutOfBoundsException( "attempted to fetch the [" + index + "] date when there are only [" @@ -240,15 +240,15 @@ public SortedNumericDoubleValues getInternalValues() { } public double getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } - return values[0]; + return get(0); } @Override public Double get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } return values[index]; } @@ -297,11 +297,7 @@ protected void resize(int newSize) { } public GeoPoint getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } - return values[0]; + return get(0); } public double getLat() { @@ -330,6 +326,10 @@ public double getLon() { @Override public GeoPoint get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } final GeoPoint point = values[index]; return new GeoPoint(point.lat(), point.lon()); } @@ -409,15 +409,15 @@ protected void resize(int newSize) { } public boolean getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } - return values[0]; + return get(0); } @Override public Boolean get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } return values[index]; } @@ -492,14 +492,14 @@ public Strings(SortedBinaryDocValues in) { @Override public String get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } return values[index].get().utf8ToString(); } public String getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } return get(0); } } @@ -512,6 +512,10 @@ public BytesRefs(SortedBinaryDocValues in) { @Override public BytesRef get(int index) { + if (count == 0) { + throw new IllegalStateException("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!"); + } /** * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the * returned value and the same instance might be used to @@ -521,10 +525,6 @@ public BytesRef get(int index) { } public BytesRef getValue() { - if (count == 0) { - throw new IllegalStateException("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!"); - } return get(0); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java index 4781a88cecd0c..52adcfe7d1e9f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java @@ -177,6 +177,11 @@ public long nextValue() throws IOException { public int docValueCount() { return dv.docValueCount(); } + + @Override + public int nextDoc() throws IOException { + return dv.nextDoc(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java index ea30f1c5c2312..74892bf7d516c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java @@ -190,7 +190,7 @@ public Mapper.Builder parse(String name, Map node, ParserContext } } final Builder builder; - if (parsedDeprecatedParams || parserContext.indexVersionCreated().before(Version.V_7_0_0)) { + if (parsedDeprecatedParams || parserContext.indexVersionCreated().before(Version.V_6_6_0)) { // Legacy index-based shape builder = new LegacyGeoShapeFieldMapper.Builder(name, deprecatedParameters); } else { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java index c0b931225b8e6..9c8726a498a3c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -201,13 +201,15 @@ public GeoShapeFieldType fieldType() { return (GeoShapeFieldType)fieldType; } - private void setupFieldTypeDeprecatedParameters() { + private void setupFieldTypeDeprecatedParameters(BuilderContext context) { GeoShapeFieldType ft = fieldType(); if (deprecatedParameters.strategy != null) { ft.setStrategy(deprecatedParameters.strategy); } if (deprecatedParameters.tree != null) { ft.setTree(deprecatedParameters.tree); + } else if (context.indexCreatedVersion().before(Version.V_6_6_0)) { + ft.setTree(DeprecatedParameters.PrefixTrees.GEOHASH); } if (deprecatedParameters.treeLevels != null) { ft.setTreeLevels(deprecatedParameters.treeLevels); @@ -275,7 +277,7 @@ protected void setupFieldType(BuilderContext context) { } // setup the deprecated parameters and the prefix tree configuration - setupFieldTypeDeprecatedParameters(); + setupFieldTypeDeprecatedParameters(context); setupPrefixTrees(); } @@ -524,7 +526,9 @@ private void indexShape(ParseContext context, Shape shape) { protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || fieldType().tree().equals(DeprecatedParameters.Defaults.TREE) == false) { + if (includeDefaults + || (fieldType().tree().equals(indexCreatedVersion.onOrAfter(Version.V_6_6_0) ? + DeprecatedParameters.Defaults.TREE : DeprecatedParameters.PrefixTrees.GEOHASH)) == false) { builder.field(DeprecatedParameters.Names.TREE.getPreferredName(), fieldType().tree()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 4d17afae614b8..6b0819f09cf6c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -174,7 +174,7 @@ public final String simpleName() { public abstract String name(); /** - * Returns a name representing the the type of this mapper. + * Returns a name representing the type of this mapper. */ public abstract String typeName(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 06e12ca8b5e4c..927bce5d9d6dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.mapper; +import com.fasterxml.jackson.core.JsonParseException; + import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; import org.apache.lucene.document.FloatPoint; @@ -1042,8 +1044,8 @@ protected void parseCreateField(ParseContext context, List field } else { try { numericValue = fieldType().type.parse(parser, coerce.value()); - } catch (IllegalArgumentException e) { - if (ignoreMalformed.value()) { + } catch (IllegalArgumentException | JsonParseException e) { + if (ignoreMalformed.value() && parser.currentToken().isValue()) { context.addIgnoredField(fieldType.name()); return; } else { diff --git a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java index 0e2a19e2b0754..0fbd4cd4fc566 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java @@ -190,6 +190,27 @@ protected Query doToQuery(QueryShardContext context) throws IOException { return new DisjunctionMaxQuery(luceneQueries, tieBreaker); } + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + DisMaxQueryBuilder newBuilder = new DisMaxQueryBuilder(); + boolean changed = false; + for (QueryBuilder query : queries) { + QueryBuilder result = query.rewrite(queryShardContext); + if (result != query) { + changed = true; + } + newBuilder.add(result); + } + if (changed) { + newBuilder.queryName(queryName); + newBuilder.boost(boost); + newBuilder.tieBreaker(tieBreaker); + return newBuilder; + } else { + return this; + } + } + @Override protected int doHashCode() { return Objects.hash(queries, tieBreaker); diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index b39f2ab5a91e6..e174b3fd49eee 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -143,38 +143,50 @@ protected static IntervalsSource combineSources(List sources, i protected List analyzeTerms(TokenStream ts) throws IOException { List terms = new ArrayList<>(); TermToBytesRefAttribute bytesAtt = ts.addAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posAtt = ts.addAttribute(PositionIncrementAttribute.class); ts.reset(); while (ts.incrementToken()) { BytesRef term = bytesAtt.getBytesRef(); - terms.add(Intervals.term(BytesRef.deepCopyOf(term))); + int precedingSpaces = posAtt.getPositionIncrement() - 1; + terms.add(extend(Intervals.term(BytesRef.deepCopyOf(term)), precedingSpaces)); } ts.end(); return terms; } + public static IntervalsSource extend(IntervalsSource source, int precedingSpaces) { + if (precedingSpaces == 0) { + return source; + } + return Intervals.extend(source, precedingSpaces, 0); + } + protected IntervalsSource analyzeSynonyms(TokenStream ts, int maxGaps, boolean ordered) throws IOException { List terms = new ArrayList<>(); List synonyms = new ArrayList<>(); TermToBytesRefAttribute bytesAtt = ts.addAttribute(TermToBytesRefAttribute.class); PositionIncrementAttribute posAtt = ts.addAttribute(PositionIncrementAttribute.class); ts.reset(); + int spaces = 0; while (ts.incrementToken()) { - if (posAtt.getPositionIncrement() == 1) { + int posInc = posAtt.getPositionIncrement(); + if (posInc > 0) { if (synonyms.size() == 1) { - terms.add(synonyms.get(0)); + terms.add(extend(synonyms.get(0), spaces)); } else if (synonyms.size() > 1) { - terms.add(Intervals.or(synonyms.toArray(new IntervalsSource[0]))); + terms.add(extend(Intervals.or(synonyms.toArray(new IntervalsSource[0])), spaces)); } synonyms.clear(); + spaces = posInc - 1; } synonyms.add(Intervals.term(BytesRef.deepCopyOf(bytesAtt.getBytesRef()))); } if (synonyms.size() == 1) { - terms.add(synonyms.get(0)); + terms.add(extend(synonyms.get(0), spaces)); } else { - terms.add(Intervals.or(synonyms.toArray(new IntervalsSource[0]))); + terms.add(extend(Intervals.or(synonyms.toArray(new IntervalsSource[0])), spaces)); } return combineSources(terms, maxGaps, ordered); } diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 88fd5293392b5..80d2d3f94c6ac 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -59,6 +59,12 @@ public void setTieBreaker(float tieBreaker) { public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException { + boolean hasMappedField = fieldNames.keySet().stream() + .anyMatch(k -> context.fieldMapper(k) != null); + if (hasMappedField == false) { + // all query fields are unmapped + return Queries.newUnmappedFieldsQuery(fieldNames.keySet()); + } final float tieBreaker = groupTieBreaker == null ? type.tieBreaker() : groupTieBreaker; final List queries; switch (type) { @@ -90,7 +96,7 @@ private Query combineGrouped(List groupQuery, float tieBreaker) { } private List buildFieldQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, - Object value, String minimumShouldMatch) throws IOException{ + Object value, String minimumShouldMatch) throws IOException { List queries = new ArrayList<>(); for (String fieldName : fieldNames.keySet()) { if (context.fieldMapper(fieldName) == null) { diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index 8249e2600ad55..a19d9ac4abb94 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -81,6 +81,15 @@ public synchronized long generateSeqNo() { return nextSeqNo++; } + /** + * Marks the provided sequence number as seen and updates the max_seq_no if needed. + */ + public synchronized void advanceMaxSeqNo(long seqNo) { + if (seqNo >= nextSeqNo) { + nextSeqNo = seqNo + 1; + } + } + /** * Marks the processing of the provided sequence number as completed as updates the checkpoint if possible. * @@ -157,11 +166,11 @@ public boolean contains(final long seqNo) { return true; } final long bitSetKey = getBitSetKey(seqNo); - final CountedBitSet bitSet; + final int bitSetOffset = seqNoToBitSetOffset(seqNo); synchronized (this) { - bitSet = processedSeqNo.get(bitSetKey); + final CountedBitSet bitSet = processedSeqNo.get(bitSetKey); + return bitSet != null && bitSet.get(bitSetOffset); } - return bitSet != null && bitSet.get(seqNoToBitSetOffset(seqNo)); } /** diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 31f491d24cf9d..5c59007f9f27c 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -39,6 +41,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -156,10 +159,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final LongSupplier currentTimeMillisSupplier; /** - * A callback when a new retention lease is created. In practice, this callback invokes the retention lease sync action, to sync - * retention leases to replicas. + * A callback when a new retention lease is created or an existing retention lease is removed. In practice, this callback invokes the + * retention lease sync action, to sync retention leases to replicas. */ - private final BiConsumer> onAddRetentionLease; + private final BiConsumer> onSyncRetentionLeases; /** * This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the @@ -177,6 +180,18 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ private RetentionLeases retentionLeases = RetentionLeases.EMPTY; + /** + * The primary term of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesPrimaryTerm; + + /** + * The version of the most-recently persisted retention leases. This is used to check if we need to persist the current retention + * leases. + */ + private long persistedRetentionLeasesVersion; + /** * Get all retention leases tracked on this shard. * @@ -237,7 +252,7 @@ public RetentionLease addRetentionLease( synchronized (this) { assert primaryMode; if (retentionLeases.contains(id)) { - throw new IllegalArgumentException("retention lease with ID [" + id + "] already exists"); + throw new RetentionLeaseAlreadyExistsException(id); } retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); retentionLeases = new RetentionLeases( @@ -246,7 +261,7 @@ public RetentionLease addRetentionLease( Stream.concat(retentionLeases.leases().stream(), Stream.of(retentionLease)).collect(Collectors.toList())); currentRetentionLeases = retentionLeases; } - onAddRetentionLease.accept(currentRetentionLeases, listener); + onSyncRetentionLeases.accept(currentRetentionLeases, listener); return retentionLease; } @@ -262,7 +277,7 @@ public RetentionLease addRetentionLease( public synchronized RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert primaryMode; if (retentionLeases.contains(id) == false) { - throw new IllegalArgumentException("retention lease with ID [" + id + "] does not exist"); + throw new RetentionLeaseNotFoundException(id); } final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); @@ -283,6 +298,29 @@ public synchronized RetentionLease renewRetentionLease(final String id, final lo return retentionLease; } + /** + * Removes an existing retention lease. + * + * @param id the identifier of the retention lease + * @param listener the callback when the retention lease is successfully removed and synced to replicas + */ + public void removeRetentionLease(final String id, final ActionListener listener) { + Objects.requireNonNull(listener); + final RetentionLeases currentRetentionLeases; + synchronized (this) { + assert primaryMode; + if (retentionLeases.contains(id) == false) { + throw new RetentionLeaseNotFoundException(id); + } + retentionLeases = new RetentionLeases( + operationPrimaryTerm, + retentionLeases.version() + 1, + retentionLeases.leases().stream().filter(lease -> lease.id().equals(id) == false).collect(Collectors.toList())); + currentRetentionLeases = retentionLeases; + } + onSyncRetentionLeases.accept(currentRetentionLeases, listener); + } + /** * Updates retention leases on a replica. * @@ -295,6 +333,55 @@ public synchronized void updateRetentionLeasesOnReplica(final RetentionLeases re } } + /** + * Loads the latest retention leases from their dedicated state file. + * + * @param path the path to the directory containing the state file + * @return the retention leases + * @throws IOException if an I/O exception occurs reading the retention leases + */ + public RetentionLeases loadRetentionLeases(final Path path) throws IOException { + final RetentionLeases retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + + // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. + assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; + if (retentionLeases == null) { + return RetentionLeases.EMPTY; + } + return retentionLeases; + } + + private final Object retentionLeasePersistenceLock = new Object(); + + /** + * Persists the current retention leases to their dedicated state file. If this version of the retention leases are already persisted + * then persistence is skipped. + * + * @param path the path to the directory containing the state file + * @throws WriteStateException if an exception occurs writing the state file + */ + public void persistRetentionLeases(final Path path) throws WriteStateException { + synchronized (retentionLeasePersistenceLock) { + final RetentionLeases currentRetentionLeases; + synchronized (this) { + if (retentionLeases.supersedes(persistedRetentionLeasesPrimaryTerm, persistedRetentionLeasesVersion) == false) { + logger.trace("skipping persisting retention leases [{}], already persisted", retentionLeases); + return; + } + currentRetentionLeases = retentionLeases; + } + logger.trace("persisting retention leases [{}]", currentRetentionLeases); + RetentionLeases.FORMAT.writeAndCleanup(currentRetentionLeases, path); + persistedRetentionLeasesPrimaryTerm = currentRetentionLeases.primaryTerm(); + persistedRetentionLeasesVersion = currentRetentionLeases.version(); + } + } + + public boolean assertRetentionLeasesPersisted(final Path path) throws IOException { + assert RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path) != null; + return true; + } + public static class CheckpointState implements Writeable { /** @@ -563,7 +650,7 @@ private static long inSyncCheckpointStates( * @param indexSettings the index settings * @param operationPrimaryTerm the current primary term * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} - * @param onAddRetentionLease a callback when a new retention lease is created or an existing retention lease expires + * @param onSyncRetentionLeases a callback when a new retention lease is created or an existing retention lease expires */ public ReplicationTracker( final ShardId shardId, @@ -573,7 +660,7 @@ public ReplicationTracker( final long globalCheckpoint, final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, - final BiConsumer> onAddRetentionLease) { + final BiConsumer> onSyncRetentionLeases) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; this.shardAllocationId = allocationId; @@ -585,7 +672,7 @@ public ReplicationTracker( checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false, false)); this.onGlobalCheckpointUpdated = Objects.requireNonNull(onGlobalCheckpointUpdated); this.currentTimeMillisSupplier = Objects.requireNonNull(currentTimeMillisSupplier); - this.onAddRetentionLease = Objects.requireNonNull(onAddRetentionLease); + this.onSyncRetentionLeases = Objects.requireNonNull(onSyncRetentionLeases); this.pendingInSync = new HashSet<>(); this.routingTable = null; this.replicationGroup = null; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java index e1d362d98764a..e6d6ed3fe825f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java @@ -19,13 +19,16 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; -import java.util.Locale; import java.util.Objects; /** @@ -34,7 +37,7 @@ * otherwise merge away operations that have been soft deleted). Each retention lease contains a unique identifier, the retaining sequence * number, the timestamp of when the lease was created or renewed, and the source of the retention lease (e.g., "ccr"). */ -public final class RetentionLease implements Writeable { +public final class RetentionLease implements ToXContent, Writeable { private final String id; @@ -94,10 +97,6 @@ public RetentionLease(final String id, final long retainingSequenceNumber, final if (id.isEmpty()) { throw new IllegalArgumentException("retention lease ID can not be empty"); } - if (id.contains(":") || id.contains(";") || id.contains(",")) { - // retention lease IDs can not contain these characters because they are used in encoding retention leases - throw new IllegalArgumentException("retention lease ID can not contain any of [:;,] but was [" + id + "]"); - } if (retainingSequenceNumber < 0) { throw new IllegalArgumentException("retention lease retaining sequence number [" + retainingSequenceNumber + "] out of range"); } @@ -108,10 +107,6 @@ public RetentionLease(final String id, final long retainingSequenceNumber, final if (source.isEmpty()) { throw new IllegalArgumentException("retention lease source can not be empty"); } - if (source.contains(":") || source.contains(";") || source.contains(",")) { - // retention lease sources can not contain these characters because they are used in encoding retention leases - throw new IllegalArgumentException("retention lease source can not contain any of [:;,] but was [" + source + "]"); - } this.id = id; this.retainingSequenceNumber = retainingSequenceNumber; this.timestamp = timestamp; @@ -145,43 +140,49 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(source); } - /** - * Encodes a retention lease as a string. This encoding can be decoded by {@link #decodeRetentionLease(String)}. The retention lease is - * encoded in the format id:{id};retaining_seq_no:{retainingSequenecNumber};timestamp:{timestamp};source:{source}. - * - * @param retentionLease the retention lease - * @return the encoding of the retention lease - */ - static String encodeRetentionLease(final RetentionLease retentionLease) { - Objects.requireNonNull(retentionLease); - return String.format( - Locale.ROOT, - "id:%s;retaining_seq_no:%d;timestamp:%d;source:%s", - retentionLease.id, - retentionLease.retainingSequenceNumber, - retentionLease.timestamp, - retentionLease.source); + private static final ParseField ID_FIELD = new ParseField("id"); + private static final ParseField RETAINING_SEQUENCE_NUMBER_FIELD = new ParseField("retaining_sequence_number"); + private static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp"); + private static final ParseField SOURCE_FIELD = new ParseField("source"); + + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "retention_leases", + (a) -> new RetentionLease((String) a[0], (Long) a[1], (Long) a[2], (String) a[3])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETAINING_SEQUENCE_NUMBER_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIMESTAMP_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), SOURCE_FIELD); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(ID_FIELD.getPreferredName(), id); + builder.field(RETAINING_SEQUENCE_NUMBER_FIELD.getPreferredName(), retainingSequenceNumber); + builder.field(TIMESTAMP_FIELD.getPreferredName(), timestamp); + builder.field(SOURCE_FIELD.getPreferredName(), source); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return false; } /** - * Decodes a retention lease encoded by {@link #encodeRetentionLease(RetentionLease)}. + * Parses a retention lease from {@link org.elasticsearch.common.xcontent.XContent}. This method assumes that the retention lease was + * converted to {@link org.elasticsearch.common.xcontent.XContent} via {@link #toXContent(XContentBuilder, Params)}. * - * @param encodedRetentionLease an encoded retention lease - * @return the decoded retention lease + * @param parser the parser + * @return a retention lease */ - static RetentionLease decodeRetentionLease(final String encodedRetentionLease) { - Objects.requireNonNull(encodedRetentionLease); - final String[] fields = encodedRetentionLease.split(";"); - assert fields.length == 4 : Arrays.toString(fields); - assert fields[0].matches("id:[^:;,]+") : fields[0]; - final String id = fields[0].substring("id:".length()); - assert fields[1].matches("retaining_seq_no:\\d+") : fields[1]; - final long retainingSequenceNumber = Long.parseLong(fields[1].substring("retaining_seq_no:".length())); - assert fields[2].matches("timestamp:\\d+") : fields[2]; - final long timestamp = Long.parseLong(fields[2].substring("timestamp:".length())); - assert fields[3].matches("source:[^:;,]+") : fields[3]; - final String source = fields[3].substring("source:".length()); - return new RetentionLease(id, retainingSequenceNumber, timestamp, source); + public static RetentionLease fromXContent(final XContentParser parser) { + return PARSER.apply(parser, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java new file mode 100644 index 0000000000000..6fa1fd7fb3f3e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -0,0 +1,404 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.single.shard.SingleShardRequest; +import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * This class holds all actions related to retention leases. Note carefully that these actions are executed under a primary permit. Care is + * taken to thread the listener through the invocations so that for the sync APIs we do not notify the listener until these APIs have + * responded with success. Additionally, note the use of + * {@link TransportSingleShardAction#asyncShardOperation(SingleShardRequest, ShardId, ActionListener)} to handle the case when acquiring + * permits goes asynchronous because acquiring permits is blocked + */ +public class RetentionLeaseActions { + + public static final long RETAIN_ALL = -1; + + abstract static class TransportRetentionLeaseAction> extends TransportSingleShardAction { + + private final IndicesService indicesService; + + @Inject + TransportRetentionLeaseAction( + final String name, + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final IndicesService indicesService, + final Supplier requestSupplier) { + super( + name, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + requestSupplier, + ThreadPool.Names.MANAGEMENT); + this.indicesService = Objects.requireNonNull(indicesService); + } + + @Override + protected ShardsIterator shards(final ClusterState state, final InternalRequest request) { + return state + .routingTable() + .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()) + .primaryShardIt(); + } + + @Override + protected void asyncShardOperation(T request, ShardId shardId, final ActionListener listener) { + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); + indexShard.acquirePrimaryOperationPermit( + new ActionListener() { + + @Override + public void onResponse(final Releasable releasable) { + try (Releasable ignore = releasable) { + doRetentionLeaseAction(indexShard, request, listener); + } + } + + @Override + public void onFailure(final Exception e) { + listener.onFailure(e); + } + + }, + ThreadPool.Names.SAME, + request); + } + + @Override + protected Response shardOperation(final T request, final ShardId shardId) { + throw new UnsupportedOperationException(); + } + + abstract void doRetentionLeaseAction(IndexShard indexShard, T request, ActionListener listener); + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected boolean resolveIndex(final T request) { + return false; + } + + } + + public static class Add extends Action { + + public static final Add INSTANCE = new Add(); + public static final String ACTION_NAME = "indices:admin/seq_no/add_retention_lease"; + + private Add() { + super(ACTION_NAME); + } + + public static class TransportAction extends TransportRetentionLeaseAction { + + @Inject + public TransportAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final IndicesService indicesService) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + indicesService, + AddRequest::new); + } + + @Override + void doRetentionLeaseAction(final IndexShard indexShard, final AddRequest request, final ActionListener listener) { + indexShard.addRetentionLease( + request.getId(), + request.getRetainingSequenceNumber(), + request.getSource(), + ActionListener.wrap( + r -> listener.onResponse(new Response()), + listener::onFailure)); + } + + } + + @Override + public Response newResponse() { + return new Response(); + } + + } + + public static class Renew extends Action { + + public static final Renew INSTANCE = new Renew(); + public static final String ACTION_NAME = "indices:admin/seq_no/renew_retention_lease"; + + private Renew() { + super(ACTION_NAME); + } + + public static class TransportAction extends TransportRetentionLeaseAction { + + @Inject + public TransportAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final IndicesService indicesService) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + indicesService, + RenewRequest::new); + } + + + @Override + void doRetentionLeaseAction(final IndexShard indexShard, final RenewRequest request, final ActionListener listener) { + indexShard.renewRetentionLease(request.getId(), request.getRetainingSequenceNumber(), request.getSource()); + listener.onResponse(new Response()); + } + + } + + @Override + public Response newResponse() { + return new Response(); + } + + } + + public static class Remove extends Action { + + public static final Remove INSTANCE = new Remove(); + public static final String ACTION_NAME = "indices:admin/seq_no/remove_retention_lease"; + + private Remove() { + super(ACTION_NAME); + } + + public static class TransportAction extends TransportRetentionLeaseAction { + + @Inject + public TransportAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final IndicesService indicesService) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + indicesService, + RemoveRequest::new); + } + + + @Override + void doRetentionLeaseAction(final IndexShard indexShard, final RemoveRequest request, final ActionListener listener) { + indexShard.removeRetentionLease( + request.getId(), + ActionListener.wrap( + r -> listener.onResponse(new Response()), + listener::onFailure)); + } + + } + + @Override + public Response newResponse() { + return new Response(); + } + + } + + private abstract static class Request> extends SingleShardRequest { + + private ShardId shardId; + + public ShardId getShardId() { + return shardId; + } + + private String id; + + public String getId() { + return id; + } + + Request() { + } + + Request(final ShardId shardId, final String id) { + super(Objects.requireNonNull(shardId).getIndexName()); + this.shardId = shardId; + this.id = Objects.requireNonNull(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + shardId = ShardId.readShardId(in); + id = in.readString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeString(id); + } + + } + + private abstract static class AddOrRenewRequest> extends Request { + + private long retainingSequenceNumber; + + public long getRetainingSequenceNumber() { + return retainingSequenceNumber; + } + + private String source; + + public String getSource() { + return source; + } + + AddOrRenewRequest() { + } + + AddOrRenewRequest(final ShardId shardId, final String id, final long retainingSequenceNumber, final String source) { + super(shardId, id); + if (retainingSequenceNumber < 0 && retainingSequenceNumber != RETAIN_ALL) { + throw new IllegalArgumentException("retaining sequence number [" + retainingSequenceNumber + "] out of range"); + } + this.retainingSequenceNumber = retainingSequenceNumber; + this.source = Objects.requireNonNull(source); + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + retainingSequenceNumber = in.readZLong(); + source = in.readString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeZLong(retainingSequenceNumber); + out.writeString(source); + } + + } + + public static class AddRequest extends AddOrRenewRequest { + + public AddRequest() { + } + + public AddRequest(final ShardId shardId, final String id, final long retainingSequenceNumber, final String source) { + super(shardId, id, retainingSequenceNumber, source); + } + + } + + public static class RenewRequest extends AddOrRenewRequest { + + public RenewRequest() { + } + + public RenewRequest(final ShardId shardId, final String id, final long retainingSequenceNumber, final String source) { + super(shardId, id, retainingSequenceNumber, source); + } + + } + + public static class RemoveRequest extends Request { + + public RemoveRequest() { + } + + public RemoveRequest(final ShardId shardId, final String id) { + super(shardId, id); + } + + } + + public static class Response extends ActionResponse { + + } + +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java new file mode 100644 index 0000000000000..ffd5e96e6a526 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseAlreadyExistsException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.Objects; + +public class RetentionLeaseAlreadyExistsException extends ResourceAlreadyExistsException { + + public RetentionLeaseAlreadyExistsException(final String id) { + super("retention lease with ID [" + Objects.requireNonNull(id) + "] already exists"); + } + + public RetentionLeaseAlreadyExistsException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index 906b505dad7e3..122db5799e9ac 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; @@ -37,6 +38,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; @@ -111,27 +113,36 @@ public void backgroundSync( ActionListener.wrap( r -> {}, e -> { - if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { - getLogger().warn(new ParameterizedMessage("{} retention lease background sync failed", shardId), e); + if (ExceptionsHelper.isTransportStoppedForAction(e, ACTION_NAME + "[p]")) { + // we are likely shutting down + return; } + if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) != null) { + // the shard is closed + return; + } + getLogger().warn(new ParameterizedMessage("{} retention lease background sync failed", shardId), e); })); } } @Override - protected PrimaryResult shardOperationOnPrimary(final Request request, final IndexShard primary) { + protected PrimaryResult shardOperationOnPrimary( + final Request request, + final IndexShard primary) throws WriteStateException { + assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); Objects.requireNonNull(request); Objects.requireNonNull(primary); - primary.afterWriteOperation(); + primary.persistRetentionLeases(); return new PrimaryResult<>(request, new ReplicationResponse()); } @Override - protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica){ + protected ReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(replica); replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); - replica.afterWriteOperation(); + replica.persistRetentionLeases(); return new ReplicaResult(); } @@ -150,6 +161,7 @@ public Request() { public Request(final ShardId shardId, final RetentionLeases retentionLeases) { super(Objects.requireNonNull(shardId)); this.retentionLeases = Objects.requireNonNull(retentionLeases); + waitForActiveShards(ActiveShardCount.NONE); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java new file mode 100644 index 0000000000000..2b13ae6b448e0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseNotFoundException.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.Objects; + +public class RetentionLeaseNotFoundException extends ResourceNotFoundException { + + public RetentionLeaseNotFoundException(final String id) { + super("retention lease with ID [" + Objects.requireNonNull(id) + "] not found"); + } + + public RetentionLeaseNotFoundException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 9be7ab046eb8b..d4845d92a3a6f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -25,13 +25,14 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -39,6 +40,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; @@ -121,29 +123,30 @@ public void sync( } @Override - protected WritePrimaryResult shardOperationOnPrimary(final Request request, final IndexShard primary) { + protected WritePrimaryResult shardOperationOnPrimary( + final Request request, + final IndexShard primary) throws WriteStateException { + assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards(); Objects.requireNonNull(request); Objects.requireNonNull(primary); - // we flush to ensure that retention leases are committed - flush(primary); - return new WritePrimaryResult<>(request, new Response(), null, null, primary, logger); + primary.persistRetentionLeases(); + return new WritePrimaryResult<>(request, new Response(), null, null, primary, getLogger()); } @Override - protected WriteReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica) { + protected WriteReplicaResult shardOperationOnReplica( + final Request request, + final IndexShard replica) throws WriteStateException { Objects.requireNonNull(request); Objects.requireNonNull(replica); replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); - // we flush to ensure that retention leases are committed - flush(replica); - return new WriteReplicaResult<>(request, null, null, replica, logger); + replica.persistRetentionLeases(); + return new WriteReplicaResult<>(request, null, null, replica, getLogger()); } - private void flush(final IndexShard indexShard) { - final FlushRequest flushRequest = new FlushRequest(); - flushRequest.force(true); - flushRequest.waitIfOngoing(true); - indexShard.flush(flushRequest); + @Override + public ClusterBlockLevel indexBlockLevel() { + return null; } public static final class Request extends ReplicatedWriteRequest { @@ -161,6 +164,7 @@ public Request() { public Request(final ShardId shardId, final RetentionLeases retentionLeases) { super(Objects.requireNonNull(shardId)); this.retentionLeases = Objects.requireNonNull(retentionLeases); + waitForActiveShards(ActiveShardCount.NONE); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java index 5a9d9e333b27b..0db32814f120c 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeases.java @@ -19,15 +19,20 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.gateway.MetaDataStateFormat; import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Locale; +import java.util.LinkedHashMap; import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -37,7 +42,7 @@ * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that * arrive out of order on the replica, using the version to ensure that older sync requests are rejected. */ -public class RetentionLeases implements Writeable { +public class RetentionLeases implements ToXContent, Writeable { private final long primaryTerm; @@ -64,13 +69,27 @@ public long version() { /** * Checks if this retention leases collection supersedes the specified retention leases collection. A retention leases collection - * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher + * supersedes another retention leases collection if its primary term is higher, or if for equal primary terms its version is higher. * * @param that the retention leases collection to test against * @return true if this retention leases collection supercedes the specified retention lease collection, otherwise false */ - public boolean supersedes(final RetentionLeases that) { - return primaryTerm > that.primaryTerm || primaryTerm == that.primaryTerm && version > that.version; + boolean supersedes(final RetentionLeases that) { + return supersedes(that.primaryTerm, that.version); + } + + /** + * Checks if this retention leases collection would supersede a retention leases collection with the specified primary term and version. + * A retention leases collection supersedes another retention leases collection if its primary term is higher, or if for equal primary + * terms its version is higher. + * + * @param primaryTerm the primary term + * @param version the version + * @return true if this retention leases collection would supercedes a retention lease collection with the specified primary term and + * version + */ + boolean supersedes(final long primaryTerm, final long version) { + return this.primaryTerm > primaryTerm || this.primaryTerm == primaryTerm && this.version > version; } private final Map leases; @@ -157,54 +176,59 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeCollection(leases.values()); } - /** - * Encodes a retention lease collection as a string. This encoding can be decoded by - * {@link RetentionLeases#decodeRetentionLeases(String)}. The encoding is a comma-separated encoding of each retention lease as encoded - * by {@link RetentionLease#encodeRetentionLease(RetentionLease)}, prefixed by the version of the retention lease collection. - * - * @param retentionLeases the retention lease collection - * @return the encoding of the retention lease collection - */ - public static String encodeRetentionLeases(final RetentionLeases retentionLeases) { - Objects.requireNonNull(retentionLeases); - return String.format( - Locale.ROOT, - "primary_term:%d;version:%d;%s", - retentionLeases.primaryTerm, - retentionLeases.version, - retentionLeases.leases.values().stream().map(RetentionLease::encodeRetentionLease).collect(Collectors.joining(","))); + private static final ParseField PRIMARY_TERM_FIELD = new ParseField("primary_term"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField LEASES_FIELD = new ParseField("leases"); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "retention_leases", + (a) -> new RetentionLeases((Long) a[0], (Long) a[1], (Collection) a[2])); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), PRIMARY_TERM_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> RetentionLease.fromXContent(p), LEASES_FIELD); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.field(PRIMARY_TERM_FIELD.getPreferredName(), primaryTerm); + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.startArray(LEASES_FIELD.getPreferredName()); + { + for (final RetentionLease retentionLease : leases.values()) { + retentionLease.toXContent(builder, params); + } + } + builder.endArray(); + return builder; } /** - * Decodes retention leases encoded by {@link #encodeRetentionLeases(RetentionLeases)}. + * Parses a retention leases collection from {@link org.elasticsearch.common.xcontent.XContent}. This method assumes that the retention + * leases were converted to {@link org.elasticsearch.common.xcontent.XContent} via {@link #toXContent(XContentBuilder, Params)}. * - * @param encodedRetentionLeases an encoded retention lease collection - * @return the decoded retention lease collection + * @param parser the parser + * @return a retention leases collection */ - public static RetentionLeases decodeRetentionLeases(final String encodedRetentionLeases) { - Objects.requireNonNull(encodedRetentionLeases); - if (encodedRetentionLeases.isEmpty()) { - return EMPTY; + public static RetentionLeases fromXContent(final XContentParser parser) { + return PARSER.apply(parser, null); + } + + static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("retention-leases-") { + + @Override + public void toXContent(final XContentBuilder builder, final RetentionLeases retentionLeases) throws IOException { + retentionLeases.toXContent(builder, ToXContent.EMPTY_PARAMS); } - assert encodedRetentionLeases.matches("primary_term:\\d+;version:\\d+;.*") : encodedRetentionLeases; - final int firstSemicolon = encodedRetentionLeases.indexOf(";"); - final long primaryTerm = Long.parseLong(encodedRetentionLeases.substring("primary_term:".length(), firstSemicolon)); - final int secondSemicolon = encodedRetentionLeases.indexOf(";", firstSemicolon + 1); - final long version = Long.parseLong(encodedRetentionLeases.substring(firstSemicolon + 1 + "version:".length(), secondSemicolon)); - final Collection leases; - if (secondSemicolon + 1 == encodedRetentionLeases.length()) { - leases = Collections.emptyList(); - } else { - assert Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) - .allMatch(s -> s.matches("id:[^:;,]+;retaining_seq_no:\\d+;timestamp:\\d+;source:[^:;,]+")) - : encodedRetentionLeases; - leases = Arrays.stream(encodedRetentionLeases.substring(secondSemicolon + 1).split(",")) - .map(RetentionLease::decodeRetentionLease) - .collect(Collectors.toList()); + + @Override + public RetentionLeases fromXContent(final XContentParser parser) { + return RetentionLeases.fromXContent(parser); } - return new RetentionLeases(primaryTerm, version, leases); - } + }; @Override public boolean equals(Object o) { @@ -237,7 +261,16 @@ public String toString() { * @return the map from retention lease ID to retention lease */ private static Map toMap(final Collection leases) { - return leases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); + // use a linked hash map to preserve order + return leases.stream() + .collect(Collectors.toMap( + RetentionLease::id, + Function.identity(), + (left, right) -> { + assert left.id().equals(right.id()) : "expected [" + left.id() + "] to equal [" + right.id() + "]"; + throw new IllegalStateException("duplicate retention lease ID [" + left.id() + "]"); + }, + LinkedHashMap::new)); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index c0a89e7cf006c..982b42b2c3f66 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -160,6 +160,13 @@ default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) { default void beforeIndexAddedToCluster(Index index, Settings indexSettings) { } + /** + * Called when the given shards store is created. The shard store is created before the shard is created. + * + * @param shardId the shard ID the store belongs to + */ + default void onStoreCreated(ShardId shardId) {} + /** * Called when the given shards store is closed. The store is closed once all resource have been released on the store. * This implies that all index readers are closed and no recoveries are running. diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 22976af581be6..ab3958acfc2c2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.ObjectLongMap; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; @@ -66,6 +67,7 @@ import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; @@ -167,6 +169,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { @@ -533,6 +536,8 @@ public void updateShardState(final ShardRouting newRouting, assert indexSettings.getIndexVersionCreated().before(Version.V_6_5_0); engine.advanceMaxSeqNoOfUpdatesOrDeletes(seqNoStats().getMaxSeqNo()); } + // in case we previously reset engine, we need to forward MSU before replaying translog. + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.restoreLocalHistoryFromTranslog((resettingEngine, snapshot) -> runTranslogRecovery(resettingEngine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {})); /* Rolling the translog generation is not strictly needed here (as we will never have collisions between @@ -778,7 +783,8 @@ private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOExc try { if (logger.isTraceEnabled()) { // don't use index.source().utf8ToString() here source might not be valid UTF-8 - logger.trace("index [{}][{}] (seq# [{}])", index.type(), index.id(), index.seqNo()); + logger.trace("index [{}][{}] seq# [{}] allocation-id {}", + index.type(), index.id(), index.seqNo(), routingEntry().allocationId()); } result = engine.index(index); } catch (Exception e) { @@ -803,7 +809,7 @@ private Engine.NoOpResult markSeqNoAsNoop(Engine engine, long seqNo, long opPrim return noOp(engine, noOp); } - private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) { + private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) throws IOException { active.set(true); if (logger.isTraceEnabled()) { logger.trace("noop (seq# [{}])", noOp.seqNo()); @@ -1209,6 +1215,7 @@ private void markSearcherAccessed() { private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) { readAllowed(); + markSearcherAccessed(); final Engine engine = getEngine(); final Engine.Searcher searcher = engine.acquireSearcher(source, scope); assert ElasticsearchDirectoryReader.unwrap(searcher.getDirectoryReader()) @@ -1390,7 +1397,7 @@ public void openEngineAndRecoverFromTranslog() throws IOException { }; innerOpenEngineAndTranslog(); final Engine engine = getEngine(); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE); } @@ -1429,7 +1436,10 @@ private void innerOpenEngineAndTranslog() throws IOException { final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); - replicationTracker.updateRetentionLeasesOnReplica(getRetentionLeases(store.readLastCommittedSegmentsInfo())); + updateRetentionLeasesOnReplica(loadRetentionLeases()); + assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty() + : "expected empty set of retention leases with recovery source [" + recoveryState.getRecoverySource() + + "] but got " + getRetentionLeases(); trimUnsafeCommits(); synchronized (mutex) { verifyNotClosed(); @@ -1449,14 +1459,6 @@ private void innerOpenEngineAndTranslog() throws IOException { assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } - static RetentionLeases getRetentionLeases(final SegmentInfos segmentInfos) { - final String committedRetentionLeases = segmentInfos.getUserData().get(Engine.RETENTION_LEASES); - if (committedRetentionLeases == null) { - return RetentionLeases.EMPTY; - } - return RetentionLeases.decodeRetentionLeases(committedRetentionLeases); - } - private void trimUnsafeCommits() throws IOException { assert currentEngineReference.get() == null || currentEngineReference.get() instanceof ReadOnlyEngine : "a write engine is running"; final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); @@ -1739,8 +1741,8 @@ public void onSettingsChanged() { /** * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed */ - public Closeable acquireRetentionLockForPeerRecovery() { - return getEngine().acquireRetentionLockForPeerRecovery(); + public Closeable acquireRetentionLock() { + return getEngine().acquireRetentionLock(); } /** @@ -1760,12 +1762,21 @@ public Translog.Snapshot getHistoryOperations(String source, long startingSeqNo) /** * Checks if we have a completed history of operations since the given starting seqno (inclusive). - * This method should be called after acquiring the retention lock; See {@link #acquireRetentionLockForPeerRecovery()} + * This method should be called after acquiring the retention lock; See {@link #acquireRetentionLock()} */ public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) throws IOException { return getEngine().hasCompleteOperationHistory(source, mapperService, startingSeqNo); } + /** + * Gets the minimum retained sequence number for this engine. + * + * @return the minimum retained sequence number + */ + public long getMinRetainedSeqNo() { + return getEngine().getMinRetainedSeqNo(); + } + /** * Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive) * and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading. @@ -1892,6 +1903,14 @@ public void addGlobalCheckpointListener( this.globalCheckpointListeners.add(waitingForGlobalCheckpoint, listener, timeout); } + private void ensureSoftDeletesEnabled(String feature) { + if (indexSettings.isSoftDeleteEnabled() == false) { + String message = feature + " requires soft deletes but " + indexSettings.getIndex() + " does not have soft deletes enabled"; + assert false : message; + throw new IllegalStateException(message); + } + } + /** * Get all retention leases tracked on this shard. * @@ -1938,7 +1957,14 @@ public RetentionLease addRetentionLease( Objects.requireNonNull(listener); assert assertPrimaryMode(); verifyNotClosed(); - return replicationTracker.addRetentionLease(id, retainingSequenceNumber, source, listener); + ensureSoftDeletesEnabled("retention leases"); + try (Closeable ignore = acquireRetentionLock()) { + final long actualRetainingSequenceNumber = + retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber; + return replicationTracker.addRetentionLease(id, actualRetainingSequenceNumber, source, listener); + } catch (final IOException e) { + throw new AssertionError(e); + } } /** @@ -1953,7 +1979,28 @@ public RetentionLease addRetentionLease( public RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert assertPrimaryMode(); verifyNotClosed(); - return replicationTracker.renewRetentionLease(id, retainingSequenceNumber, source); + ensureSoftDeletesEnabled("retention leases"); + try (Closeable ignore = acquireRetentionLock()) { + final long actualRetainingSequenceNumber = + retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber; + return replicationTracker.renewRetentionLease(id, actualRetainingSequenceNumber, source); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + + /** + * Removes an existing retention lease. + * + * @param id the identifier of the retention lease + * @param listener the callback when the retention lease is successfully removed and synced to replicas + */ + public void removeRetentionLease(final String id, final ActionListener listener) { + Objects.requireNonNull(listener); + assert assertPrimaryMode(); + verifyNotClosed(); + ensureSoftDeletesEnabled("retention leases"); + replicationTracker.removeRetentionLease(id, listener); } /** @@ -1967,30 +2014,56 @@ public void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); } + /** + * Loads the latest retention leases from their dedicated state file. + * + * @return the retention leases + * @throws IOException if an I/O exception occurs reading the retention leases + */ + public RetentionLeases loadRetentionLeases() throws IOException { + verifyNotClosed(); + return replicationTracker.loadRetentionLeases(path.getShardStatePath()); + } + + /** + * Persists the current retention leases to their dedicated state file. + * + * @throws WriteStateException if an exception occurs writing the state file + */ + public void persistRetentionLeases() throws WriteStateException { + verifyNotClosed(); + replicationTracker.persistRetentionLeases(path.getShardStatePath()); + } + + public boolean assertRetentionLeasesPersisted() throws IOException { + return replicationTracker.assertRetentionLeasesPersisted(path.getShardStatePath()); + } + /** * Syncs the current retention leases to all replicas. */ public void syncRetentionLeases() { assert assertPrimaryMode(); verifyNotClosed(); + ensureSoftDeletesEnabled("retention leases"); final Tuple retentionLeases = getRetentionLeases(true); if (retentionLeases.v1()) { - retentionLeaseSyncer.sync(shardId, retentionLeases.v2(), ActionListener.wrap(() -> {})); + logger.trace("syncing retention leases [{}] after expiration check", retentionLeases.v2()); + retentionLeaseSyncer.sync( + shardId, + retentionLeases.v2(), + ActionListener.wrap( + r -> {}, + e -> logger.warn(new ParameterizedMessage( + "failed to sync retention leases [{}] after expiration check", + retentionLeases), + e))); } else { + logger.trace("background syncing retention leases [{}] after expiration check", retentionLeases.v2()); retentionLeaseSyncer.backgroundSync(shardId, retentionLeases.v2()); } } - /** - * Waits for all operations up to the provided sequence number to complete. - * - * @param seqNo the sequence number that the checkpoint must advance to before this method returns - * @throws InterruptedException if the thread was interrupted while blocking on the condition - */ - public void waitForOpsToComplete(final long seqNo) throws InterruptedException { - getEngine().waitForOpsToComplete(seqNo); - } - /** * Called when the recovery process for a shard has opened the engine on the target shard. Ensures that the right data structures * have been set up locally to track local checkpoint information for the shard and that the shard is added to the replication group. @@ -2437,7 +2510,7 @@ public void acquirePrimaryOperationPermit(ActionListener onPermitAcq verifyNotClosed(); assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting; - indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo); + indexShardOperationPermits.acquire(wrapPrimaryOperationPermitListener(onPermitAcquired), executorOnDelay, false, debugInfo); } /** @@ -2448,7 +2521,27 @@ public void acquireAllPrimaryOperationsPermits(final ActionListener verifyNotClosed(); assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting; - asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit()); + asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit()); + } + + /** + * Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before + * executing the action. + * + * @param listener the listener to wrap + * @return the wrapped listener + */ + private ActionListener wrapPrimaryOperationPermitListener(final ActionListener listener) { + return ActionListener.wrap( + r -> { + if (replicationTracker.isPrimaryMode()) { + listener.onResponse(r); + } else { + r.close(); + listener.onFailure(new ShardNotInPrimaryModeException(shardId, state)); + } + }, + listener::onFailure); } private void asyncBlockOperations(ActionListener onPermitAcquired, long timeout, TimeUnit timeUnit) { @@ -2920,9 +3013,7 @@ private void setRefreshPending(Engine engine) { * true if the listener was registered to wait for a refresh. */ public final void awaitShardSearchActive(Consumer listener) { - if (isSearchIdle()) { - markSearcherAccessed(); // move the shard into non-search idle - } + markSearcherAccessed(); // move the shard into non-search idle final Translog.Location location = pendingRefreshLocation.get(); if (location != null) { addRefreshListener(location, (b) -> { @@ -3092,4 +3183,17 @@ public void advanceMaxSeqNoOfUpdatesOrDeletes(long seqNo) { getEngine().advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); assert seqNo <= getMaxSeqNoOfUpdatesOrDeletes() : getMaxSeqNoOfUpdatesOrDeletes() + " < " + seqNo; } + + /** + * Performs the pre-closing checks on the {@link IndexShard}. + * + * @throws IllegalStateException if the sanity checks failed + */ + public void verifyShardBeforeIndexClosing() throws IllegalStateException { + getEngine().verifyEngineBeforeIndexClosing(); + } + + RetentionLeaseSyncer getRetentionLeaseSyncer() { + return retentionLeaseSyncer; + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index cc9ac40c2744d..72b99f4d4868f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -33,6 +33,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.Collection; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; final class LocalShardSnapshot implements Closeable { @@ -116,6 +117,12 @@ public Lock obtainLock(String name) throws IOException { public void close() throws IOException { throw new UnsupportedOperationException("nobody should close this directory wrapper"); } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index f3e631f8bf6e0..07aade952923b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -246,11 +246,11 @@ protected void doRun() throws Exception { Translog.Operation operation; while ((operation = snapshot.next()) != null) { final long seqNo = operation.seqNo(); - if (startingSeqNo >= 0 && - (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo)) { + if (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || seqNo < startingSeqNo) { totalSkippedOps.incrementAndGet(); continue; } + assert operation.seqNo() >= 0 : "sending operation with unassigned sequence number [" + operation + "]"; operations.add(operation); size += operation.estimateSize(); totalSentOps.incrementAndGet(); @@ -260,7 +260,6 @@ protected void doRun() throws Exception { break; } } - final long trimmedAboveSeqNo = firstMessage.get() ? maxSeqNo : SequenceNumbers.UNASSIGNED_SEQ_NO; // have to send sync request even in case of there are no operations to sync - have to sync trimmedAboveSeqNo at least if (!operations.isEmpty() || trimmedAboveSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java new file mode 100644 index 0000000000000..8bc23dcdd00f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardNotInPrimaryModeException.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.shard; + +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class ShardNotInPrimaryModeException extends IllegalIndexShardStateException { + + public ShardNotInPrimaryModeException(final ShardId shardId, final IndexShardState currentState) { + super(shardId, currentState, "shard is not in primary mode"); + } + + public ShardNotInPrimaryModeException(final StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index d15de54c54e99..06b6fa557983e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -259,6 +259,12 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { assert index.getFileDetails(dest).recovered() == l : index.getFileDetails(dest).toString(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } /** @@ -401,9 +407,11 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), localCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); + writeEmptyRetentionLeasesFile(indexShard); } else if (indexShouldExists) { if (recoveryState.getRecoverySource().shouldBootstrapNewHistoryUUID()) { store.bootstrapNewHistory(); + writeEmptyRetentionLeasesFile(indexShard); } // since we recover from local, just fill the files and size try { @@ -420,6 +428,7 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); + writeEmptyRetentionLeasesFile(indexShard); } indexShard.openEngineAndRecoverFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); @@ -432,6 +441,12 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe } } + private static void writeEmptyRetentionLeasesFile(IndexShard indexShard) throws IOException { + assert indexShard.getRetentionLeases().leases().isEmpty() : indexShard.getRetentionLeases(); // not loaded yet + indexShard.persistRetentionLeases(); + assert indexShard.loadRetentionLeases().leases().isEmpty(); + } + private void addRecoveredFileDetails(SegmentInfos si, Store store, RecoveryState.Index index) throws IOException { final Directory directory = store.directory(); for (String name : Lucene.files(si)) { @@ -471,6 +486,7 @@ private void restore(final IndexShard indexShard, final Repository repository, f indexShard.shardPath().resolveTranslog(), localCheckpoint, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + writeEmptyRetentionLeasesFile(indexShard); indexShard.openEngineAndRecoverFromTranslog(); indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 34b07932e48ff..565716799065e 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -37,10 +37,10 @@ import static java.util.Collections.unmodifiableMap; /** - * Contains information about all snapshot for the given shard in repository + * Contains information about all snapshots for the given shard in repository *

* This class is used to find files that were already snapshotted and clear out files that no longer referenced by any - * snapshots + * snapshots. */ public class BlobStoreIndexShardSnapshots implements Iterable, ToXContentFragment { diff --git a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java index 3b0a912c2df79..9a202a9b4cd31 100644 --- a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java +++ b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java @@ -32,6 +32,7 @@ import java.io.UncheckedIOException; import java.nio.file.AccessDeniedException; import java.nio.file.NoSuchFileException; +import java.util.Set; final class ByteSizeCachingDirectory extends FilterDirectory { @@ -180,4 +181,9 @@ public void deleteFile(String name) throws IOException { } } + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index badf981803603..a8b50fcc53895 100644 --- a/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/server/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -144,6 +144,7 @@ public IndexInput openInput(String name, IOContext context) throws IOException { case "nvd": case "dvd": case "tim": + case "cfs": // we need to do these checks on the outer directory since the inner doesn't know about pending deletes ensureOpen(); ensureCanRead(name); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 73ac8a65d3007..0d03c93d81f3b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -38,6 +38,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BufferedChecksum; import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.ByteBufferIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; @@ -45,6 +46,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; +import org.apache.lucene.store.RandomAccessInput; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -95,6 +97,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -126,6 +129,14 @@ * */ public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted { + /** + * This is an escape hatch for lucenes internal optimization that checks if the IndexInput is an instance of ByteBufferIndexInput + * and if that's the case doesn't load the term dictionary into ram but loads it off disk iff the fields is not an ID like field. + * Since this optimization has been added very late in the release processes we add this setting to allow users to opt-out of + * this by exploiting lucene internals and wrapping the IndexInput in a simple delegate. + */ + public static final Setting FORCE_RAM_TERM_DICT = Setting.boolSetting("index.force_memory_term_dictionary", false, + Property.IndexScope); static final String CODEC = "store"; static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0 static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0 @@ -160,7 +171,8 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(directory, refreshInterval); - this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); + this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId), + indexSettings.getValue(FORCE_RAM_TERM_DICT)); this.shardLock = shardLock; this.onClose = onClose; @@ -436,7 +448,7 @@ private void closeInternal() { */ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException { - try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); + try (ShardLock lock = shardLocker.lock(shardId, "read metadata snapshot", TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); return new MetadataSnapshot(null, dir, logger); @@ -457,7 +469,7 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId */ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException { - try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); + try (ShardLock lock = shardLocker.lock(shardId, "open index", TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); SegmentInfos segInfo = Lucene.readSegmentInfos(dir); @@ -700,10 +712,12 @@ public int refCount() { static final class StoreDirectory extends FilterDirectory { private final Logger deletesLogger; + private final boolean forceRamTermDict; - StoreDirectory(ByteSizeCachingDirectory delegateDirectory, Logger deletesLogger) { + StoreDirectory(ByteSizeCachingDirectory delegateDirectory, Logger deletesLogger, boolean forceRamTermDict) { super(delegateDirectory); this.deletesLogger = deletesLogger; + this.forceRamTermDict = forceRamTermDict; } /** Estimate the cumulative size of all files in this directory in bytes. */ @@ -730,10 +744,29 @@ private void innerClose() throws IOException { super.close(); } + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + IndexInput input = super.openInput(name, context); + if (name.endsWith(".tip") || name.endsWith(".cfs")) { + // only do this if we are reading cfs or tip file - all other files don't need this. + if (forceRamTermDict && input instanceof ByteBufferIndexInput) { + return new DeoptimizingIndexInput(input.toString(), input); + } + } + return input; + } + @Override public String toString() { return "store(" + in.toString() + ")"; } + + @Override + public Set getPendingDeletions() throws IOException { + // FilterDirectory.getPendingDeletions does not delegate, working around it here. + // to be removed once fixed in FilterDirectory. + return unwrap(this).getPendingDeletions(); + } } /** @@ -1521,7 +1554,8 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long if (existingCommits.isEmpty()) { throw new IllegalArgumentException("No index found to trim"); } - final String translogUUID = existingCommits.get(existingCommits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); + final IndexCommit lastIndexCommitCommit = existingCommits.get(existingCommits.size() - 1); + final String translogUUID = lastIndexCommitCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY); final IndexCommit startingIndexCommit; // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. @@ -1546,7 +1580,7 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long + startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY) + "] is not equal to last commit's translog uuid [" + translogUUID + "]"); } - if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) { + if (startingIndexCommit.equals(lastIndexCommitCommit) == false) { try (IndexWriter writer = newAppendingIndexWriter(directory, startingIndexCommit)) { // this achieves two things: // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened @@ -1603,4 +1637,126 @@ private static IndexWriterConfig newIndexWriterConfig() { .setMergePolicy(NoMergePolicy.INSTANCE); } + /** + * see {@link #FORCE_RAM_TERM_DICT} for details + */ + private static final class DeoptimizingIndexInput extends IndexInput { + + private final IndexInput in; + + private DeoptimizingIndexInput(String resourceDescription, IndexInput in) { + super(resourceDescription); + this.in = in; + } + + @Override + public IndexInput clone() { + return new DeoptimizingIndexInput(toString(), in.clone()); + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public long getFilePointer() { + return in.getFilePointer(); + } + + @Override + public void seek(long pos) throws IOException { + in.seek(pos); + } + + @Override + public long length() { + return in.length(); + } + + @Override + public String toString() { + return in.toString(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + return new DeoptimizingIndexInput(sliceDescription, in.slice(sliceDescription, offset, length)); + } + + @Override + public RandomAccessInput randomAccessSlice(long offset, long length) throws IOException { + return in.randomAccessSlice(offset, length); + } + + @Override + public byte readByte() throws IOException { + return in.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + in.readBytes(b, offset, len); + } + + @Override + public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException { + in.readBytes(b, offset, len, useBuffer); + } + + @Override + public short readShort() throws IOException { + return in.readShort(); + } + + @Override + public int readInt() throws IOException { + return in.readInt(); + } + + @Override + public int readVInt() throws IOException { + return in.readVInt(); + } + + @Override + public int readZInt() throws IOException { + return in.readZInt(); + } + + @Override + public long readLong() throws IOException { + return in.readLong(); + } + + @Override + public long readVLong() throws IOException { + return in.readVLong(); + } + + @Override + public long readZLong() throws IOException { + return in.readZLong(); + } + + @Override + public String readString() throws IOException { + return in.readString(); + } + + @Override + public Map readMapOfStrings() throws IOException { + return in.readMapOfStrings(); + } + + @Override + public Set readSetOfStrings() throws IOException { + return in.readSetOfStrings(); + } + + @Override + public void skipBytes(long numBytes) throws IOException { + in.skipBytes(numBytes); + } + } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 1dfbc3af42b49..dc054f8b51d3e 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -42,6 +42,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.IdentityHashMap; import java.util.Map; @@ -71,7 +72,7 @@ public class IndicesQueryCache implements QueryCache, Closeable { // This is a hack for the fact that the close listener for the // ShardCoreKeyMap will be called before onDocIdSetEviction // See onDocIdSetEviction for more info - private final Map stats2 = new IdentityHashMap<>(); + private final Map stats2 = Collections.synchronizedMap(new IdentityHashMap<>()); public IndicesQueryCache(Settings settings) { final ByteSizeValue size = INDICES_CACHE_QUERY_SIZE_SETTING.get(settings); @@ -189,30 +190,50 @@ public void close() { assert shardKeyMap.size() == 0 : shardKeyMap.size(); assert shardStats.isEmpty() : shardStats.keySet(); assert stats2.isEmpty() : stats2; + + // This cache stores two things: filters, and doc id sets. At this time + // we only know that there are no more doc id sets, but we still track + // recently used queries, which we want to reclaim. cache.clear(); } private static class Stats implements Cloneable { + final ShardId shardId; volatile long ramBytesUsed; volatile long hitCount; volatile long missCount; volatile long cacheCount; volatile long cacheSize; + Stats(ShardId shardId) { + this.shardId = shardId; + } + QueryCacheStats toQueryCacheStats() { return new QueryCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); } + + @Override + public String toString() { + return "{shardId=" + shardId + ", ramBytedUsed=" + ramBytesUsed + ", hitCount=" + hitCount + ", missCount=" + missCount + + ", cacheCount=" + cacheCount + ", cacheSize=" + cacheSize + "}"; + } } private static class StatsAndCount { - int count; + volatile int count; final Stats stats; StatsAndCount(Stats stats) { this.stats = stats; this.count = 0; } + + @Override + public String toString() { + return "{stats=" + stats + " ,count=" + count + "}"; + } } private boolean empty(Stats stats) { @@ -249,7 +270,7 @@ private Stats getOrCreateStats(Object coreKey) { final ShardId shardId = shardKeyMap.getShardId(coreKey); Stats stats = shardStats.get(shardId); if (stats == null) { - stats = new Stats(); + stats = new Stats(shardId); shardStats.put(shardId, stats); } return stats; @@ -265,6 +286,7 @@ protected void onClear() { stats.cacheSize = 0; stats.ramBytesUsed = 0; } + stats2.clear(); sharedRamBytesUsed = 0; } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 4682cd4dff421..bb45ca9bb8302 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -62,6 +62,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -83,6 +84,7 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -124,6 +126,7 @@ import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; import java.util.Collection; @@ -189,11 +192,12 @@ public class IndicesService extends AbstractLifecycleComponent private final NamedWriteableRegistry namedWriteableRegistry; private final IndexingMemoryController indexingMemoryController; private final TimeValue cleanInterval; - private final IndicesRequestCache indicesRequestCache; + final IndicesRequestCache indicesRequestCache; // pkg-private for testing private final IndicesQueryCache indicesQueryCache; private final MetaStateService metaStateService; private final Collection>> engineFactoryProviders; private final Map> indexStoreFactories; + final AbstractRefCounted indicesRefCount; // pkg-private for testing @Override protected void doStart() { @@ -249,6 +253,27 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon } this.indexStoreFactories = indexStoreFactories; + // doClose() is called when shutting down a node, yet there might still be ongoing requests + // that we need to wait for before closing some resources such as the caches. In order to + // avoid closing these resources while ongoing requests are still being processed, we use a + // ref count which will only close them when both this service and all index services are + // actually closed + indicesRefCount = new AbstractRefCounted("indices") { + @Override + protected void closeInternal() { + try { + IOUtils.close( + analysisRegistry, + indexingMemoryController, + indicesFieldDataCache, + cacheCleaner, + indicesRequestCache, + indicesQueryCache); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; } @Override @@ -280,14 +305,8 @@ protected void doStop() { } @Override - protected void doClose() { - IOUtils.closeWhileHandlingException( - analysisRegistry, - indexingMemoryController, - indicesFieldDataCache, - cacheCleaner, - indicesRequestCache, - indicesQueryCache); + protected void doClose() throws IOException { + indicesRefCount.decRef(); } /** @@ -455,9 +474,17 @@ public synchronized IndexService createIndex( } List finalListeners = new ArrayList<>(builtInListeners); final IndexEventListener onStoreClose = new IndexEventListener() { + @Override + public void onStoreCreated(ShardId shardId) { + indicesRefCount.incRef(); + } @Override public void onStoreClosed(ShardId shardId) { - indicesQueryCache.onClose(shardId); + try { + indicesQueryCache.onClose(shardId); + } finally { + indicesRefCount.decRef(); + } } }; finalListeners.add(onStoreClose); @@ -493,6 +520,11 @@ private synchronized IndexService createIndexService(final String reason, List builtInListeners, IndexingOperationListener... indexingOperationListeners) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetaData, settings, indexScopedSettings); + if (idxSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0) + && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { + throw new IllegalArgumentException( + "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0"); + } // we ignore private settings since they are not registered settings indexScopedSettings.validate(indexMetaData.getSettings(), true, true, true); logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]", @@ -868,6 +900,9 @@ public IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState final IndexMetaData metaData; try { metaData = metaStateService.loadIndexState(index); + if (metaData == null) { + return null; + } } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to load state file from a stale deleted index, " + "folders will be left on disk", index), e); @@ -1025,7 +1060,7 @@ public void processPendingDeletes(Index index, IndexSettings indexSettings, Time throws IOException, InterruptedException, ShardLockObtainFailedException { logger.debug("{} processing pending deletes", index); final long startTimeNS = System.nanoTime(); - final List shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis()); + final List shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, "process pending deletes", timeout.millis()); int numRemoved = 0; try { Map locks = new HashMap<>(); @@ -1340,7 +1375,7 @@ interface IndexDeletionAllowedPredicate { (Index index, IndexSettings indexSettings) -> canDeleteIndexContents(index, indexSettings); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; - public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { + public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch * of dependencies we pass in a function that can perform the parsing. */ CheckedFunction filterParser = bytes -> { @@ -1349,8 +1384,8 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, String... return parseInnerQueryBuilder(parser); } }; - String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions); IndexMetaData indexMetaData = state.metaData().index(index); + String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, resolvedExpressions); return new AliasFilter(ShardSearchRequest.parseAliasFilter(filterParser, indexMetaData, aliases), aliases); } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index 7539c1653cce4..bc71cb597b234 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -81,7 +81,7 @@ public void put(Version version, T model) { @Override public Collection values() { - return Collections.singleton(model); + return model == null ? Collections.emptySet() : Collections.singleton(model); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java new file mode 100644 index 0000000000000..87a6d18671a6f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/recovery/MultiFileWriter.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.indices.recovery; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreFileMetaData; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Map; +import java.util.PriorityQueue; +import java.util.concurrent.ConcurrentMap; + +public class MultiFileWriter implements Releasable { + + public MultiFileWriter(Store store, RecoveryState.Index indexState, String tempFilePrefix, Logger logger, Runnable ensureOpen) { + this.store = store; + this.indexState = indexState; + this.tempFilePrefix = tempFilePrefix; + this.logger = logger; + this.ensureOpen = ensureOpen; + } + + private final Runnable ensureOpen; + private final Logger logger; + private final Store store; + private final RecoveryState.Index indexState; + private final String tempFilePrefix; + + private final ConcurrentMap openIndexOutputs = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap fileChunkWriters = ConcurrentCollections.newConcurrentMap(); + + + final Map tempFileNames = ConcurrentCollections.newConcurrentMap(); + + public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean lastChunk) + throws IOException { + final FileChunkWriter writer = fileChunkWriters.computeIfAbsent(fileMetaData.name(), name -> new FileChunkWriter()); + writer.writeChunk(new FileChunk(fileMetaData, content, position, lastChunk)); + } + + /** Get a temporary name for the provided file name. */ + String getTempNameForFile(String origFile) { + return tempFilePrefix + origFile; + } + + public IndexOutput getOpenIndexOutput(String key) { + ensureOpen.run(); + return openIndexOutputs.get(key); + } + + /** remove and {@link IndexOutput} for a given file. It is the caller's responsibility to close it */ + public IndexOutput removeOpenIndexOutputs(String name) { + ensureOpen.run(); + return openIndexOutputs.remove(name); + } + + /** + * Creates an {@link IndexOutput} for the given file name. Note that the + * IndexOutput actually point at a temporary file. + *

+ * Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput + * at a later stage + */ + public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException { + ensureOpen.run(); + String tempFileName = getTempNameForFile(fileName); + if (tempFileNames.containsKey(tempFileName)) { + throw new IllegalStateException("output for file [" + fileName + "] has already been created"); + } + // add first, before it's created + tempFileNames.put(tempFileName, fileName); + IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT); + openIndexOutputs.put(fileName, indexOutput); + return indexOutput; + } + + private void innerWriteFileChunk(StoreFileMetaData fileMetaData, long position, + BytesReference content, boolean lastChunk) throws IOException { + final String name = fileMetaData.name(); + IndexOutput indexOutput; + if (position == 0) { + indexOutput = openAndPutIndexOutput(name, fileMetaData, store); + } else { + indexOutput = getOpenIndexOutput(name); + } + assert indexOutput.getFilePointer() == position : "file-pointer " + indexOutput.getFilePointer() + " != " + position; + BytesRefIterator iterator = content.iterator(); + BytesRef scratch; + while((scratch = iterator.next()) != null) { // we iterate over all pages - this is a 0-copy for all core impls + indexOutput.writeBytes(scratch.bytes, scratch.offset, scratch.length); + } + indexState.addRecoveredBytesToFile(name, content.length()); + if (indexOutput.getFilePointer() >= fileMetaData.length() || lastChunk) { + try { + Store.verify(indexOutput); + } finally { + // we are done + indexOutput.close(); + } + final String temporaryFileName = getTempNameForFile(name); + assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName) : + "expected: [" + temporaryFileName + "] in " + Arrays.toString(store.directory().listAll()); + store.directory().sync(Collections.singleton(temporaryFileName)); + IndexOutput remove = removeOpenIndexOutputs(name); + assert remove == null || remove == indexOutput; // remove maybe null if we got finished + } + } + + @Override + public void close() { + fileChunkWriters.clear(); + // clean open index outputs + Iterator> iterator = openIndexOutputs.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + logger.trace("closing IndexOutput file [{}]", entry.getValue()); + try { + entry.getValue().close(); + } catch (Exception e) { + logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); + } + iterator.remove(); + } + if (Strings.hasText(tempFilePrefix)) { + // trash temporary files + for (String file : tempFileNames.keySet()) { + logger.trace("cleaning temporary file [{}]", file); + store.deleteQuiet(file); + } + } + } + + /** renames all temporary files to their true name, potentially overriding existing files */ + public void renameAllTempFiles() throws IOException { + ensureOpen.run(); + store.renameTempFilesSafe(tempFileNames); + } + + static final class FileChunk { + final StoreFileMetaData md; + final BytesReference content; + final long position; + final boolean lastChunk; + FileChunk(StoreFileMetaData md, BytesReference content, long position, boolean lastChunk) { + this.md = md; + this.content = content; + this.position = position; + this.lastChunk = lastChunk; + } + } + + private final class FileChunkWriter { + // chunks can be delivered out of order, we need to buffer chunks if there's a gap between them. + final PriorityQueue pendingChunks = new PriorityQueue<>(Comparator.comparing(fc -> fc.position)); + long lastPosition = 0; + + void writeChunk(FileChunk newChunk) throws IOException { + synchronized (this) { + pendingChunks.add(newChunk); + } + while (true) { + final FileChunk chunk; + synchronized (this) { + chunk = pendingChunks.peek(); + if (chunk == null || chunk.position != lastPosition) { + return; + } + pendingChunks.remove(); + } + innerWriteFileChunk(chunk.md, chunk.position, chunk.content, chunk.lastChunk); + synchronized (this) { + assert lastPosition == chunk.position : "last_position " + lastPosition + " != chunk_position " + chunk.position; + lastPosition += chunk.content.length(); + if (chunk.lastChunk) { + assert pendingChunks.isEmpty() == true : "still have pending chunks [" + pendingChunks + "]"; + fileChunkWriters.remove(chunk.md.name()); + assert fileChunkWriters.containsValue(this) == false : "chunk writer [" + newChunk.md + "] was not removed"; + } + } + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 068b92991db09..b5a72b37c74ad 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -360,7 +360,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove logger.trace("{} preparing for file-based recovery from [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); } else { logger.trace( - "{} preparing for sequence-number-based recovery starting at local checkpoint [{}] from [{}]", + "{} preparing for sequence-number-based recovery starting at sequence number [{}] from [{}]", recoveryTarget.shardId(), startingSeqNo, recoveryTarget.sourceNode()); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index a6973a46926a7..6bca848a361fa 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -155,17 +155,15 @@ public void recoverToTarget(ActionListener listener) { assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ", shard, cancellableThreads, logger); - final Closeable retentionLock = shard.acquireRetentionLockForPeerRecovery(); + final Closeable retentionLock = shard.acquireRetentionLock(); resources.add(retentionLock); final long startingSeqNo; - final long requiredSeqNoRangeStart; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && isTargetSameHistory() && shard.hasCompleteHistoryOperations("peer-recovery", request.startingSeqNo()); final SendFileResult sendFileResult; if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); - requiredSeqNoRangeStart = startingSeqNo; sendFileResult = SendFileResult.EMPTY; } else { final Engine.IndexCommitRef phase1Snapshot; @@ -174,13 +172,9 @@ public void recoverToTarget(ActionListener listener) { } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } - // We must have everything above the local checkpoint in the commit - requiredSeqNoRangeStart = - Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; - // If soft-deletes enabled, we need to transfer only operations after the local_checkpoint of the commit to have - // the same history on the target. However, with translog, we need to set this to 0 to create a translog roughly - // according to the retention policy on the target. Note that it will still filter out legacy operations without seqNo. - startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; + // We need to set this to 0 to create a translog roughly according to the retention policy on the target. Note that it will + // still filter out legacy operations without seqNo. + startingSeqNo = 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); sendFileResult = phase1(phase1Snapshot.getIndexCommit(), () -> estimateNumOps); @@ -195,8 +189,6 @@ public void recoverToTarget(ActionListener listener) { } } assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; - assert requiredSeqNoRangeStart >= startingSeqNo : "requiredSeqNoRangeStart [" + requiredSeqNoRangeStart + "] is lower than [" - + startingSeqNo + "]"; final StepListener prepareEngineStep = new StepListener<>(); // For a sequence based recovery, the target can keep its local translog @@ -214,13 +206,7 @@ public void recoverToTarget(ActionListener listener) { shardId + " initiating tracking of " + request.targetAllocationId(), shard, cancellableThreads, logger); final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - /* - * We need to wait for all operations up to the current max to complete, otherwise we can not guarantee that all - * operations in the required range will be available for replaying from the translog of the source. - */ - cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); if (logger.isTraceEnabled()) { - logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); } @@ -233,15 +219,8 @@ public void recoverToTarget(ActionListener listener) { final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); final RetentionLeases retentionLeases = shard.getRetentionLeases(); - phase2( - startingSeqNo, - requiredSeqNoRangeStart, - endingSeqNo, - phase2Snapshot, - maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - sendSnapshotStep); + phase2(startingSeqNo, endingSeqNo, phase2Snapshot, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, + retentionLeases, sendSnapshotStep); sendSnapshotStep.whenComplete( r -> IOUtils.close(phase2Snapshot), e -> { @@ -519,7 +498,6 @@ void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, A * * @param startingSeqNo the sequence number to start recovery from, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if all * ops should be sent - * @param requiredSeqNoRangeStart the lower sequence number of the required range (ending with endingSeqNo) * @param endingSeqNo the highest sequence number that should be sent * @param snapshot a snapshot of the translog * @param maxSeenAutoIdTimestamp the max auto_id_timestamp of append-only requests on the primary @@ -528,26 +506,19 @@ void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, A */ void phase2( final long startingSeqNo, - final long requiredSeqNoRangeStart, final long endingSeqNo, final Translog.Snapshot snapshot, final long maxSeenAutoIdTimestamp, final long maxSeqNoOfUpdatesOrDeletes, final RetentionLeases retentionLeases, final ActionListener listener) throws IOException { - assert requiredSeqNoRangeStart <= endingSeqNo + 1: - "requiredSeqNoRangeStart " + requiredSeqNoRangeStart + " is larger than endingSeqNo " + endingSeqNo; - assert startingSeqNo <= requiredSeqNoRangeStart : - "startingSeqNo " + startingSeqNo + " is larger than requiredSeqNoRangeStart " + requiredSeqNoRangeStart; if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } - logger.trace("recovery [phase2]: sending transaction log operations (seq# from [" + startingSeqNo + "], " + - "required [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "]"); + logger.trace("recovery [phase2]: sending transaction log operations (from [" + startingSeqNo + "] to [" + endingSeqNo + "]"); final AtomicInteger skippedOps = new AtomicInteger(); final AtomicInteger totalSentOps = new AtomicInteger(); - final LocalCheckpointTracker requiredOpsTracker = new LocalCheckpointTracker(endingSeqNo, requiredSeqNoRangeStart - 1); final AtomicInteger lastBatchCount = new AtomicInteger(); // used to estimate the count of the subsequent batch. final CheckedSupplier, IOException> readNextBatch = () -> { // We need to synchronized Snapshot#next() because it's called by different threads through sendBatch. @@ -569,7 +540,6 @@ void phase2( ops.add(operation); batchSizeInBytes += operation.estimateSize(); totalSentOps.incrementAndGet(); - requiredOpsTracker.markSeqNoAsCompleted(seqNo); // check if this request is past bytes threshold, and if so, send it off if (batchSizeInBytes >= chunkSizeInBytes) { @@ -587,11 +557,6 @@ void phase2( assert snapshot.totalOperations() == snapshot.skippedOperations() + skippedOps.get() + totalSentOps.get() : String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]", snapshot.totalOperations(), snapshot.skippedOperations(), skippedOps.get(), totalSentOps.get()); - if (requiredOpsTracker.getCheckpoint() < endingSeqNo) { - throw new IllegalStateException("translog replay failed to cover required sequence numbers" + - " (required range [" + requiredSeqNoRangeStart + ":" + endingSeqNo + "). first missing op is [" - + (requiredOpsTracker.getCheckpoint() + 1) + "]"); - } stopWatch.stop(); final TimeValue tookTime = stopWatch.totalTime(); logger.trace("recovery [phase2]: took [{}]", tookTime); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 1fed238f8ddf6..4d27362af22b5 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -417,6 +417,10 @@ public synchronized void reset() { stopTime = 0; } + // for tests + public long getStartNanoTime() { + return startNanoTime; + } } public static class VerifyIndex extends Timer implements ToXContentFragment, Writeable { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index e63b9ba8fd5ea..a97208561962e 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -20,14 +20,10 @@ package org.elasticsearch.indices.recovery; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -39,7 +35,6 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.seqno.ReplicationTracker; @@ -55,15 +50,7 @@ import java.io.IOException; import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.PriorityQueue; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -85,15 +72,13 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget private final long recoveryId; private final IndexShard indexShard; private final DiscoveryNode sourceNode; - private final String tempFilePrefix; + private final MultiFileWriter multiFileWriter; private final Store store; private final PeerRecoveryTargetService.RecoveryListener listener; private final LongConsumer ensureClusterStateVersionCallback; private final AtomicBoolean finished = new AtomicBoolean(); - private final ConcurrentMap openIndexOutputs = ConcurrentCollections.newConcurrentMap(); - private final ConcurrentMap fileChunkWriters = ConcurrentCollections.newConcurrentMap(); private final CancellableThreads cancellableThreads; // last time this status was accessed @@ -102,8 +87,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget // latch that can be used to blockingly wait for RecoveryTarget to be closed private final CountDownLatch closedLatch = new CountDownLatch(1); - private final Map tempFileNames = ConcurrentCollections.newConcurrentMap(); - /** * Creates a new recovery target object that represents a recovery to the provided shard. * @@ -126,7 +109,9 @@ public RecoveryTarget(final IndexShard indexShard, this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); - this.tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; + final String tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; + this.multiFileWriter = new MultiFileWriter(indexShard.store(), indexShard.recoveryState().getIndex(), tempFilePrefix, logger, + this::ensureRefCount); this.store = indexShard.store(); this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback; // make sure the store is not released until we are done. @@ -187,12 +172,6 @@ public RecoveryState.Stage stage() { return state().getStage(); } - /** renames all temporary files to their true name, potentially overriding existing files */ - public void renameAllTempFiles() throws IOException { - ensureRefCount(); - store.renameTempFilesSafe(tempFileNames); - } - /** * Closes the current recovery target and waits up to a certain timeout for resources to be freed. * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done. @@ -274,7 +253,7 @@ public void notifyListener(RecoveryFailedException e, boolean sendShardFailure) /** mark the current recovery as done */ public void markAsDone() { if (finished.compareAndSet(false, true)) { - assert tempFileNames.isEmpty() : "not all temporary files are renamed"; + assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; try { // this might still throw an exception ie. if the shard is CLOSED due to some other event. // it's safer to decrement the reference in a try finally here. @@ -287,65 +266,12 @@ public void markAsDone() { } } - /** Get a temporary name for the provided file name. */ - public String getTempNameForFile(String origFile) { - return tempFilePrefix + origFile; - } - - public IndexOutput getOpenIndexOutput(String key) { - ensureRefCount(); - return openIndexOutputs.get(key); - } - - /** remove and {@link org.apache.lucene.store.IndexOutput} for a given file. It is the caller's responsibility to close it */ - public IndexOutput removeOpenIndexOutputs(String name) { - ensureRefCount(); - return openIndexOutputs.remove(name); - } - - /** - * Creates an {@link org.apache.lucene.store.IndexOutput} for the given file name. Note that the - * IndexOutput actually point at a temporary file. - *

- * Note: You can use {@link #getOpenIndexOutput(String)} with the same filename to retrieve the same IndexOutput - * at a later stage - */ - public IndexOutput openAndPutIndexOutput(String fileName, StoreFileMetaData metaData, Store store) throws IOException { - ensureRefCount(); - String tempFileName = getTempNameForFile(fileName); - if (tempFileNames.containsKey(tempFileName)) { - throw new IllegalStateException("output for file [" + fileName + "] has already been created"); - } - // add first, before it's created - tempFileNames.put(tempFileName, fileName); - IndexOutput indexOutput = store.createVerifyingOutput(tempFileName, metaData, IOContext.DEFAULT); - openIndexOutputs.put(fileName, indexOutput); - return indexOutput; - } - @Override protected void closeInternal() { try { - // clean open index outputs - Iterator> iterator = openIndexOutputs.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - logger.trace("closing IndexOutput file [{}]", entry.getValue()); - try { - entry.getValue().close(); - } catch (Exception e) { - logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); - } - iterator.remove(); - } - // trash temporary files - for (String file : tempFileNames.keySet()) { - logger.trace("cleaning temporary file [{}]", file); - store.deleteQuiet(file); - } + multiFileWriter.close(); } finally { // free store. increment happens in constructor - fileChunkWriters.clear(); store.decRef(); indexShard.recoveryStats().decCurrentAsTarget(); closedLatch.countDown(); @@ -382,6 +308,7 @@ public void finalizeRecovery(final long globalCheckpoint, ActionListener l indexShard.updateGlobalCheckpointOnReplica(globalCheckpoint, "finalizing recovery"); // Persist the global checkpoint. indexShard.sync(); + indexShard.persistRetentionLeases(); indexShard.finalizeRecovery(); return null; }); @@ -434,8 +361,12 @@ public void indexTranslogOperations( if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { throw new MapperException("mapping updates are not allowed [" + operation + "]"); } - assert result.getFailure() == null : "unexpected failure while replicating translog entry: " + result.getFailure(); - ExceptionsHelper.reThrowIfNotNull(result.getFailure()); + if (result.getFailure() != null) { + if (Assertions.ENABLED) { + throw new AssertionError("unexpected failure while replicating translog entry", result.getFailure()); + } + ExceptionsHelper.reThrowIfNotNull(result.getFailure()); + } } // update stats only after all operations completed (to ensure that mapping updates don't mess with stats) translog.incrementRecoveredOperations(operations.size()); @@ -470,7 +401,7 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa // first, we go and move files that were created with the recovery id suffix to // the actual names, its ok if we have a corrupted index here, since we have replicas // to recover from in case of a full cluster shutdown just when this code executes... - renameAllTempFiles(); + multiFileWriter.renameAllTempFiles(); final Store store = store(); store.incRef(); try { @@ -483,6 +414,15 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPendingPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); + + if (indexShard.getRetentionLeases().leases().isEmpty()) { + // if empty, may be a fresh IndexShard, so write an empty leases file to disk + indexShard.persistRetentionLeases(); + assert indexShard.loadRetentionLeases().leases().isEmpty(); + } else { + assert indexShard.assertRetentionLeasesPersisted(); + } + } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are @@ -511,96 +451,21 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa } } - private void innerWriteFileChunk(StoreFileMetaData fileMetaData, long position, - BytesReference content, boolean lastChunk) throws IOException { - final Store store = store(); - final String name = fileMetaData.name(); - final RecoveryState.Index indexState = state().getIndex(); - IndexOutput indexOutput; - if (position == 0) { - indexOutput = openAndPutIndexOutput(name, fileMetaData, store); - } else { - indexOutput = getOpenIndexOutput(name); - } - assert indexOutput.getFilePointer() == position : "file-pointer " + indexOutput.getFilePointer() + " != " + position; - BytesRefIterator iterator = content.iterator(); - BytesRef scratch; - while((scratch = iterator.next()) != null) { // we iterate over all pages - this is a 0-copy for all core impls - indexOutput.writeBytes(scratch.bytes, scratch.offset, scratch.length); - } - indexState.addRecoveredBytesToFile(name, content.length()); - if (indexOutput.getFilePointer() >= fileMetaData.length() || lastChunk) { - try { - Store.verify(indexOutput); - } finally { - // we are done - indexOutput.close(); - } - final String temporaryFileName = getTempNameForFile(name); - assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName) : - "expected: [" + temporaryFileName + "] in " + Arrays.toString(store.directory().listAll()); - store.directory().sync(Collections.singleton(temporaryFileName)); - IndexOutput remove = removeOpenIndexOutputs(name); - assert remove == null || remove == indexOutput; // remove maybe null if we got finished - } - } - @Override public void writeFileChunk(StoreFileMetaData fileMetaData, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener listener) { try { state().getTranslog().totalOperations(totalTranslogOps); - final FileChunkWriter writer = fileChunkWriters.computeIfAbsent(fileMetaData.name(), name -> new FileChunkWriter()); - writer.writeChunk(new FileChunk(fileMetaData, content, position, lastChunk)); + multiFileWriter.writeFileChunk(fileMetaData, position, content, lastChunk); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); } } - private static final class FileChunk { - final StoreFileMetaData md; - final BytesReference content; - final long position; - final boolean lastChunk; - FileChunk(StoreFileMetaData md, BytesReference content, long position, boolean lastChunk) { - this.md = md; - this.content = content; - this.position = position; - this.lastChunk = lastChunk; - } - } - - private final class FileChunkWriter { - // chunks can be delivered out of order, we need to buffer chunks if there's a gap between them. - final PriorityQueue pendingChunks = new PriorityQueue<>(Comparator.comparing(fc -> fc.position)); - long lastPosition = 0; - - void writeChunk(FileChunk newChunk) throws IOException { - synchronized (this) { - pendingChunks.add(newChunk); - } - while (true) { - final FileChunk chunk; - synchronized (this) { - chunk = pendingChunks.peek(); - if (chunk == null || chunk.position != lastPosition) { - return; - } - pendingChunks.remove(); - } - innerWriteFileChunk(chunk.md, chunk.position, chunk.content, chunk.lastChunk); - synchronized (this) { - assert lastPosition == chunk.position : "last_position " + lastPosition + " != chunk_position " + chunk.position; - lastPosition += chunk.content.length(); - if (chunk.lastChunk) { - assert pendingChunks.isEmpty() == true : "still have pending chunks [" + pendingChunks + "]"; - fileChunkWriters.remove(chunk.md.name()); - assert fileChunkWriters.containsValue(this) == false : "chunk writer [" + newChunk.md + "] was not removed"; - } - } - } - } + /** Get a temporary name for the provided file name. */ + public String getTempNameForFile(String origFile) { + return multiFileWriter.getTempNameForFile(origFile); } Path translogLocation() { diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index b2143d72ae65f..31023fc85d5b8 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -343,7 +343,7 @@ public String getType() { return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); } - static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { + public static ClusterState innerPut(PutPipelineRequest request, ClusterState currentState) { IngestMetadata currentIngestMetadata = currentState.metaData().custom(IngestMetadata.TYPE); Map pipelines; if (currentIngestMetadata != null) { diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index fc5311be5cbde..218713383227e 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -89,6 +89,9 @@ public static Pipeline create(String id, Map config, /** * Modifies the data of a document to be indexed based on the processor this pipeline holds + * + * If null is returned then this document will be dropped and not indexed, otherwise + * this document will be kept and indexed. */ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { long startTimeInNanos = relativeTimeProvider.getAsLong(); diff --git a/server/src/main/java/org/elasticsearch/ingest/Processor.java b/server/src/main/java/org/elasticsearch/ingest/Processor.java index 92b08bba77bf7..c064ddb35a129 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/server/src/main/java/org/elasticsearch/ingest/Processor.java @@ -39,6 +39,9 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. + * + * @return If null is returned then the current document will be dropped and not be indexed, + * otherwise this document will be kept and indexed */ IngestDocument execute(IngestDocument ingestDocument) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index f24acc9c034e9..b8eda3303377d 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -19,6 +19,11 @@ package org.elasticsearch.monitor.jvm; +import org.apache.lucene.util.Constants; +import org.elasticsearch.Version; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -150,10 +155,28 @@ public class JvmInfo implements Writeable, ToXContentFragment { } + final boolean bundledJdk = Booleans.parseBoolean(System.getProperty("es.bundled_jdk", Boolean.FALSE.toString())); + final Boolean usingBundledJdk = bundledJdk ? usingBundledJdk() : null; + INSTANCE = new JvmInfo(JvmPid.getPid(), System.getProperty("java.version"), runtimeMXBean.getVmName(), runtimeMXBean.getVmVersion(), - runtimeMXBean.getVmVendor(), runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize, - mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, onOutOfMemoryError, - useCompressedOops, useG1GC, useSerialGC); + runtimeMXBean.getVmVendor(), bundledJdk, usingBundledJdk, runtimeMXBean.getStartTime(), configuredInitialHeapSize, + configuredMaxHeapSize, mem, inputArguments, bootClassPath, classPath, systemProperties, gcCollectors, memoryPools, onError, + onOutOfMemoryError, useCompressedOops, useG1GC, useSerialGC); + } + + @SuppressForbidden(reason = "PathUtils#get") + private static boolean usingBundledJdk() { + /* + * We are using the bundled JDK if java.home is the jdk sub-directory of our working directory. This is because we always set + * the working directory of Elasticsearch to home, and the bundled JDK is in the jdk sub-directory there. + */ + final String javaHome = System.getProperty("java.home"); + final String userDir = System.getProperty("user.dir"); + if (Constants.MAC_OS_X) { + return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk/Contents/Home").toAbsolutePath()); + } else { + return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk").toAbsolutePath()); + } } public static JvmInfo jvmInfo() { @@ -170,6 +193,8 @@ public static JvmInfo jvmInfo() { private final String vmName; private final String vmVersion; private final String vmVendor; + private final boolean bundledJdk; + private final Boolean usingBundledJdk; private final long startTime; private final long configuredInitialHeapSize; private final long configuredMaxHeapSize; @@ -186,15 +211,18 @@ public static JvmInfo jvmInfo() { private final String useG1GC; private final String useSerialGC; - private JvmInfo(long pid, String version, String vmName, String vmVersion, String vmVendor, long startTime, - long configuredInitialHeapSize, long configuredMaxHeapSize, Mem mem, String[] inputArguments, String bootClassPath, - String classPath, Map systemProperties, String[] gcCollectors, String[] memoryPools, String onError, - String onOutOfMemoryError, String useCompressedOops, String useG1GC, String useSerialGC) { + private JvmInfo(long pid, String version, String vmName, String vmVersion, String vmVendor, boolean bundledJdk, Boolean usingBundledJdk, + long startTime, long configuredInitialHeapSize, long configuredMaxHeapSize, Mem mem, String[] inputArguments, + String bootClassPath, String classPath, Map systemProperties, String[] gcCollectors, + String[] memoryPools, String onError, String onOutOfMemoryError, String useCompressedOops, String useG1GC, + String useSerialGC) { this.pid = pid; this.version = version; this.vmName = vmName; this.vmVersion = vmVersion; this.vmVendor = vmVendor; + this.bundledJdk = bundledJdk; + this.usingBundledJdk = usingBundledJdk; this.startTime = startTime; this.configuredInitialHeapSize = configuredInitialHeapSize; this.configuredMaxHeapSize = configuredMaxHeapSize; @@ -218,6 +246,13 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + bundledJdk = in.readBoolean(); + usingBundledJdk = in.readOptionalBoolean(); + } else { + bundledJdk = false; + usingBundledJdk = null; + } startTime = in.readLong(); inputArguments = new String[in.readInt()]; for (int i = 0; i < inputArguments.length; i++) { @@ -246,6 +281,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(bundledJdk); + out.writeOptionalBoolean(usingBundledJdk); + } out.writeLong(startTime); out.writeInt(inputArguments.length); for (String inputArgument : inputArguments) { @@ -360,6 +399,14 @@ public String getVmVendor() { return this.vmVendor; } + public boolean getBundledJdk() { + return bundledJdk; + } + + public Boolean getUsingBundledJdk() { + return usingBundledJdk; + } + public long getStartTime() { return this.startTime; } @@ -436,6 +483,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.VM_NAME, vmName); builder.field(Fields.VM_VERSION, vmVersion); builder.field(Fields.VM_VENDOR, vmVendor); + builder.field(Fields.BUNDLED_JDK, bundledJdk); + builder.field(Fields.USING_BUNDLED_JDK, usingBundledJdk); builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime); builder.startObject(Fields.MEM); @@ -464,6 +513,8 @@ static final class Fields { static final String VM_NAME = "vm_name"; static final String VM_VERSION = "vm_version"; static final String VM_VENDOR = "vm_vendor"; + static final String BUNDLED_JDK = "bundled_jdk"; + static final String USING_BUNDLED_JDK = "using_bundled_jdk"; static final String START_TIME = "start_time"; static final String START_TIME_IN_MILLIS = "start_time_in_millis"; diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 18173dd275a46..320bc15fda1f4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -42,6 +42,24 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +/** + * The {@link OsProbe} class retrieves information about the physical and swap size of the machine + * memory, as well as the system load average and cpu load. + * + * In some exceptional cases, it's possible the underlying native method used by + * {@link #getFreePhysicalMemorySize()} and {@link #getTotalPhysicalMemorySize()} can return a + * negative value. Because of this, we prevent those methods from returning negative values, + * returning 0 instead. + * + * The OS can report a negative number in a number of cases: + * - Non-supported OSes (HP-UX, or AIX) + * - A failure of macOS to initialize host statistics + * - An OS that does not support the {@code _SC_PHYS_PAGES} or {@code _SC_PAGE_SIZE} flags for the {@code sysconf()} linux kernel call + * - An overflow of the product of {@code _SC_PHYS_PAGES} and {@code _SC_PAGE_SIZE} + * - An error case retrieving these values from a linux kernel + * - A non-standard libc implementation not implementing the required values + * For a more exhaustive explanation, see https://github.com/elastic/elasticsearch/pull/42725 + */ public class OsProbe { private static final OperatingSystemMXBean osMxBean = ManagementFactory.getOperatingSystemMXBean(); @@ -67,12 +85,19 @@ public class OsProbe { */ public long getFreePhysicalMemorySize() { if (getFreePhysicalMemorySize == null) { - return -1; + logger.warn("getFreePhysicalMemorySize is not available"); + return 0; } try { - return (long) getFreePhysicalMemorySize.invoke(osMxBean); + final long freeMem = (long) getFreePhysicalMemorySize.invoke(osMxBean); + if (freeMem < 0) { + logger.warn("OS reported a negative free memory value [{}]", freeMem); + return 0; + } + return freeMem; } catch (Exception e) { - return -1; + logger.warn("exception retrieving free physical memory", e); + return 0; } } @@ -81,12 +106,19 @@ public long getFreePhysicalMemorySize() { */ public long getTotalPhysicalMemorySize() { if (getTotalPhysicalMemorySize == null) { - return -1; + logger.warn("getTotalPhysicalMemorySize is not available"); + return 0; } try { - return (long) getTotalPhysicalMemorySize.invoke(osMxBean); + final long totalMem = (long) getTotalPhysicalMemorySize.invoke(osMxBean); + if (totalMem < 0) { + logger.warn("OS reported a negative total memory value [{}]", totalMem); + return 0; + } + return totalMem; } catch (Exception e) { - return -1; + logger.warn("exception retrieving total physical memory", e); + return 0; } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index 3bdfe95f1e2c6..f4ddd441c8e7b 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -229,13 +229,17 @@ public static class Mem implements Writeable, ToXContentFragment { private final long free; public Mem(long total, long free) { + assert total >= 0 : "expected total memory to be positive, got: " + total; + assert free >= 0 : "expected free memory to be positive, got: " + total; this.total = total; this.free = free; } public Mem(StreamInput in) throws IOException { this.total = in.readLong(); + assert total >= 0 : "expected total memory to be positive, got: " + total; this.free = in.readLong(); + assert free >= 0 : "expected free memory to be positive, got: " + total; } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 19af7a467a789..5f60c0b1d6243 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -270,8 +271,9 @@ protected Node( nodeEnvironment = new NodeEnvironment(tmpSettings, environment); resourcesToClose.add(nodeEnvironment); - logger.info("node name [{}], node ID [{}]", - NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); + logger.info("node name [{}], node ID [{}], cluster name [{}]", + NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId(), + ClusterName.CLUSTER_NAME_SETTING.get(tmpSettings).value()); final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( @@ -289,6 +291,7 @@ protected Node( Constants.JVM_NAME, Constants.JAVA_VERSION, Constants.JVM_VERSION); + logger.info("JVM home [{}]", System.getProperty("java.home")); logger.info("JVM arguments {}", Arrays.toString(jvmInfo.getInputArguments())); if (Build.CURRENT.isProductionRelease() == false) { logger.warn( diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c8cdf0d4e0308..264d069377967 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -122,27 +122,28 @@ *

  * {@code
  *   STORE_ROOT
- *   |- index-N           - list of all snapshot ids and the indices belonging to each snapshot, N is the generation of the file
+ *   |- index-N           - JSON serialized {@link RepositoryData} containing a list of all snapshot ids and the indices belonging to
+ *   |                      each snapshot, N is the generation of the file
  *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
  *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
- *   |- snap-20131010 - JSON serialized Snapshot for snapshot "20131010"
- *   |- meta-20131010.dat - JSON serialized MetaData for snapshot "20131010" (includes only global metadata)
- *   |- snap-20131011 - JSON serialized Snapshot for snapshot "20131011"
- *   |- meta-20131011.dat - JSON serialized MetaData for snapshot "20131011"
+ *   |- snap-20131010.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131010"
+ *   |- meta-20131010.dat - SMILE serialized {@link MetaData} for snapshot "20131010" (includes only global metadata)
+ *   |- snap-20131011.dat - SMILE serialized {@link SnapshotInfo} for snapshot "20131011"
+ *   |- meta-20131011.dat - SMILE serialized {@link MetaData} for snapshot "20131011"
  *   .....
  *   |- indices/ - data for all indices
  *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id of Ac1342-B_x in the repository
- *      |  |- meta-20131010.dat - JSON Serialized IndexMetaData for index "foo"
+ *      |  |- meta-20131010.dat - JSON Serialized {@link IndexMetaData} for index "foo"
  *      |  |- 0/ - data for shard "0" of index "foo"
- *      |  |  |- __1 \
- *      |  |  |- __2 |
- *      |  |  |- __3 |- files from different segments see snapshot-* for their mappings to real segment files
- *      |  |  |- __4 |
- *      |  |  |- __5 /
+ *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
+ *      |  |  |- __2                      |
+ *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
+ *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
+ *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
  *      |  |  .....
- *      |  |  |- snap-20131010.dat - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131010"
- *      |  |  |- snap-20131011.dat - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011"
- *      |  |  |- list-123 - JSON serialized BlobStoreIndexShardSnapshot for snapshot "20131011"
+ *      |  |  |- snap-20131010.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131010"
+ *      |  |  |- snap-20131011.dat - SMILE serialized {@link BlobStoreIndexShardSnapshot} for snapshot "20131011"
+ *      |  |  |- index-123 - SMILE serialized {@link BlobStoreIndexShardSnapshots} for the shard
  *      |  |
  *      |  |- 1/ - data for shard "1" of index "foo"
  *      |  |  |- __1
@@ -195,6 +196,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
 
     private final Settings settings;
 
+    private final boolean compress;
+
     private final RateLimiter snapshotRateLimiter;
 
     private final RateLimiter restoreRateLimiter;
@@ -226,33 +229,37 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
      *
      * @param metadata       The metadata for this repository including name and settings
      * @param settings Settings for the node this repository object is created on
+     * @param compress true if metadata and snapshot files should be compressed
      */
-    protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) {
+    protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, boolean compress,
+                                  NamedXContentRegistry namedXContentRegistry) {
         this.settings = settings;
+        this.compress = compress;
         this.metadata = metadata;
         this.namedXContentRegistry = namedXContentRegistry;
         snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
         restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
         readOnly = metadata.settings().getAsBoolean("readonly", false);
 
+
         indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT,
-            BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, isCompress());
+            BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, compress);
         indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT,
-            BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, isCompress());
-        ByteSizeValue chunkSize = chunkSize();
-        if (chunkSize != null && chunkSize.getBytes() <= 0) {
-            throw new IllegalArgumentException("the chunk size cannot be negative: [" + chunkSize + "]");
-        }
+            BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, compress);
     }
 
     @Override
     protected void doStart() {
+        ByteSizeValue chunkSize = chunkSize();
+        if (chunkSize != null && chunkSize.getBytes() <= 0) {
+            throw new IllegalArgumentException("the chunk size cannot be negative: [" + chunkSize + "]");
+        }
         globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT,
-            MetaData::fromXContent, namedXContentRegistry, isCompress());
+            MetaData::fromXContent, namedXContentRegistry, compress);
         indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT,
-            IndexMetaData::fromXContent, namedXContentRegistry, isCompress());
+            IndexMetaData::fromXContent, namedXContentRegistry, compress);
         snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT,
-            SnapshotInfo::fromXContentInternal, namedXContentRegistry, isCompress());
+            SnapshotInfo::fromXContentInternal, namedXContentRegistry, compress);
     }
 
     @Override
@@ -347,8 +354,8 @@ protected BlobStore blobStore() {
      *
      * @return true if compression is needed
      */
-    protected boolean isCompress() {
-        return false;
+    protected final boolean isCompress() {
+        return compress;
     }
 
     /**
@@ -460,17 +467,17 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
             final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
             for (final IndexId indexId : indicesToCleanUp) {
                 try {
-                    indicesBlobContainer.deleteBlob(indexId.getId());
+                    indicesBlobContainer.deleteBlobIgnoringIfNotExists(indexId.getId());
                 } catch (DirectoryNotEmptyException dnee) {
                     // if the directory isn't empty for some reason, it will fail to clean up;
                     // we'll ignore that and accept that cleanup didn't fully succeed.
                     // since we are using UUIDs for path names, this won't be an issue for
                     // snapshotting indices of the same name
-                    logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
+                    logger.warn(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
                         "but failed to clean up its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
                 } catch (IOException ioe) {
                     // a different IOException occurred while trying to delete - will just log the issue for now
-                    logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
+                    logger.warn(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, " +
                         "but failed to clean up its index folder.", metadata.name(), indexId), ioe);
                 }
             }
@@ -820,7 +827,7 @@ private long listBlobsToGetLatestIndexId() throws IOException {
             } catch (NumberFormatException nfe) {
                 // the index- blob wasn't of the format index-N where N is a number,
                 // no idea what this blob is but it doesn't belong in the repository!
-                logger.debug("[{}] Unknown blob in the repository: {}", metadata.name(), blobName);
+                logger.warn("[{}] Unknown blob in the repository: {}", metadata.name(), blobName);
             }
         }
         return latest;
@@ -963,7 +970,7 @@ public void delete() {
             try {
                 indexShardSnapshotFormat.delete(blobContainer, snapshotId.getUUID());
             } catch (IOException e) {
-                logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId);
+                logger.warn(new ParameterizedMessage("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId), e);
             }
 
             // Build a list of snapshots that should be preserved
@@ -1057,41 +1064,6 @@ protected void finalize(final List snapshots,
             }
         }
 
-        /**
-         * Generates blob name
-         *
-         * @param generation the blob number
-         * @return the blob name
-         */
-        protected String fileNameFromGeneration(long generation) {
-            return DATA_BLOB_PREFIX + Long.toString(generation, Character.MAX_RADIX);
-        }
-
-        /**
-         * Finds the next available blob number
-         *
-         * @param blobs list of blobs in the repository
-         * @return next available blob number
-         */
-        protected long findLatestFileNameGeneration(Map blobs) {
-            long generation = -1;
-            for (String name : blobs.keySet()) {
-                if (!name.startsWith(DATA_BLOB_PREFIX)) {
-                    continue;
-                }
-                name = canonicalName(name);
-                try {
-                    long currentGen = Long.parseLong(name.substring(DATA_BLOB_PREFIX.length()), Character.MAX_RADIX);
-                    if (currentGen > generation) {
-                        generation = currentGen;
-                    }
-                } catch (NumberFormatException e) {
-                    logger.warn("file [{}] does not conform to the '{}' schema", name, DATA_BLOB_PREFIX);
-                }
-            }
-            return generation;
-        }
-
         /**
          * Loads all available snapshots in the repository
          *
@@ -1123,7 +1095,7 @@ protected Tuple buildBlobStoreIndexShardS
                     logger.warn(() -> new ParameterizedMessage("failed to read index file [{}]", file), e);
                 }
             } else if (blobKeys.isEmpty() == false) {
-                logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path());
+                logger.warn("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path());
             }
 
             // We couldn't load the index file - falling back to loading individual snapshots
@@ -1141,7 +1113,7 @@ protected Tuple buildBlobStoreIndexShardS
                     logger.warn(() -> new ParameterizedMessage("failed to read commit point [{}]", name), e);
                 }
             }
-            return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1);
+            return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), latest);
         }
     }
 
@@ -1184,7 +1156,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) {
                 throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
             }
 
-            long generation = findLatestFileNameGeneration(blobs);
             Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs);
             BlobStoreIndexShardSnapshots snapshots = tuple.v1();
             int fileListGeneration = tuple.v2();
@@ -1252,7 +1223,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) {
                         indexIncrementalSize += md.length();
                         // create a new FileInfo
                         BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo =
-                            new BlobStoreIndexShardSnapshot.FileInfo(fileNameFromGeneration(++generation), md, chunkSize());
+                            new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
                         indexCommitPointFiles.add(snapshotFileInfo);
                         filesToSnapshot.add(snapshotFileInfo);
                     } else {
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
index 2f837812ae2e2..1e0ab2dd8beee 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java
@@ -31,7 +31,6 @@
 import org.apache.lucene.util.BytesRefBuilder;
 import org.elasticsearch.common.lucene.Lucene;
 import org.elasticsearch.common.util.iterable.Iterables;
-import org.elasticsearch.index.IndexNotFoundException;
 import org.elasticsearch.index.shard.IndexShard;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
@@ -62,14 +61,14 @@
  */
 public abstract class FileRestoreContext {
 
-    private static final Logger logger = LogManager.getLogger(FileRestoreContext.class);
+    protected static final Logger logger = LogManager.getLogger(FileRestoreContext.class);
 
-    private final String repositoryName;
-    private final IndexShard indexShard;
-    private final RecoveryState recoveryState;
-    private final SnapshotId snapshotId;
-    private final ShardId shardId;
-    private final int bufferSize;
+    protected final String repositoryName;
+    protected final IndexShard indexShard;
+    protected final RecoveryState recoveryState;
+    protected final SnapshotId snapshotId;
+    protected final ShardId shardId;
+    protected final int bufferSize;
 
     /**
      * Constructs new restore context
@@ -119,7 +118,7 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
                 // store can still have existing files but they will be deleted just before being
                 // restored.
                 recoveryTargetMetadata = indexShard.snapshotStoreMetadata();
-            } catch (IndexNotFoundException e) {
+            } catch (org.apache.lucene.index.IndexNotFoundException e) {
                 // happens when restore to an empty shard, not a big deal
                 logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId);
                 recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY;
@@ -183,7 +182,6 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
                 // list of all existing store files
                 final List deleteIfExistFiles = Arrays.asList(store.directory().listAll());
 
-                // restore the files from the snapshot to the Lucene store
                 for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
                     // if a file with a same physical name already exist in the store we need to delete it
                     // before restoring it from the snapshot. We could be lenient and try to reuse the existing
@@ -196,10 +194,9 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
                         logger.trace("[{}] [{}] deleting pre-existing file [{}]", shardId, snapshotId, physicalName);
                         store.directory().deleteFile(physicalName);
                     }
-
-                    logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
-                    restoreFile(fileToRecover, store);
                 }
+
+                restoreFiles(filesToRecover, store);
             } catch (IOException ex) {
                 throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex);
             }
@@ -234,6 +231,14 @@ public void restore(SnapshotFiles snapshotFiles) throws IOException {
         }
     }
 
+    protected void restoreFiles(List filesToRecover, Store store) throws IOException {
+        // restore the files from the snapshot to the Lucene store
+        for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
+            logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
+            restoreFile(fileToRecover, store);
+        }
+    }
+
     protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo);
 
     @SuppressWarnings("unchecked")
diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
index 01c08fbce0044..a47ced0496d9b 100644
--- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java
@@ -63,21 +63,19 @@ public class FsRepository extends BlobStoreRepository {
         new ByteSizeValue(Long.MAX_VALUE), new ByteSizeValue(5), new ByteSizeValue(Long.MAX_VALUE), Property.NodeScope);
     public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope);
     public static final Setting REPOSITORIES_COMPRESS_SETTING =
-        Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope);
+        Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope, Property.Deprecated);
     private final Environment environment;
 
     private ByteSizeValue chunkSize;
 
     private final BlobPath basePath;
 
-    private boolean compress;
-
     /**
      * Constructs a shared file system repository.
      */
     public FsRepository(RepositoryMetaData metadata, Environment environment,
                         NamedXContentRegistry namedXContentRegistry) {
-        super(metadata, environment.settings(), namedXContentRegistry);
+        super(metadata, environment.settings(), calculateCompress(metadata, environment), namedXContentRegistry);
         this.environment = environment;
         String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
         if (location.isEmpty()) {
@@ -105,21 +103,19 @@ public FsRepository(RepositoryMetaData metadata, Environment environment,
         } else {
             this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
         }
-        this.compress = COMPRESS_SETTING.exists(metadata.settings())
-            ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings());
         this.basePath = BlobPath.cleanPath();
     }
 
+    private static boolean calculateCompress(RepositoryMetaData metadata, Environment environment) {
+        return COMPRESS_SETTING.exists(metadata.settings())
+            ? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings());
+    }
+
     @Override
     protected BlobStore createBlobStore() throws Exception {
         final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
         final Path locationFile = environment.resolveRepoFile(location);
-        return new FsBlobStore(environment.settings(), locationFile);
-    }
-
-    @Override
-    protected boolean isCompress() {
-        return compress;
+        return new FsBlobStore(environment.settings(), locationFile, isReadOnly());
     }
 
     @Override
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java
index 2a58ebf52d4b0..c0d8c1cf698d6 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java
@@ -22,6 +22,7 @@
 import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest;
 import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction;
 import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.rest.BaseRestHandler;
@@ -47,15 +48,19 @@ public String getName() {
 
     @Override
     protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
-        String nodeName = request.param("node_name");
-        AddVotingConfigExclusionsRequest votingConfigExclusionsRequest = new AddVotingConfigExclusionsRequest(
-            new String[]{nodeName},
-            TimeValue.parseTimeValue(request.param("timeout"), DEFAULT_TIMEOUT, getClass().getSimpleName() + ".timeout")
-        );
+        AddVotingConfigExclusionsRequest votingConfigExclusionsRequest = resolveVotingConfigExclusionsRequest(request);
         return channel -> client.execute(
             AddVotingConfigExclusionsAction.INSTANCE,
             votingConfigExclusionsRequest,
             new RestToXContentListener<>(channel)
         );
     }
+
+    AddVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(final RestRequest request) {
+        String nodeName = request.param("node_name");
+        return new AddVotingConfigExclusionsRequest(
+            Strings.splitStringByCommaToArray(nodeName),
+            TimeValue.parseTimeValue(request.param("timeout"), DEFAULT_TIMEOUT, getClass().getSimpleName() + ".timeout")
+        );
+    }
 }
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java
index 71c2842ed825f..732fe6398655d 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java
@@ -102,8 +102,6 @@ public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder
                     builder.field(Fields.WAIT_FOR_TIMED_OUT, response.isWaitForTimedOut());
                 }
                 builder.field(Fields.CLUSTER_NAME, response.getClusterName().value());
-                builder.humanReadableField(Fields.CLUSTER_STATE_SIZE_IN_BYTES, Fields.CLUSTER_STATE_SIZE,
-                        response.getTotalCompressedSize());
                 response.getState().toXContent(builder, request);
                 builder.endObject();
                 return new BytesRestResponse(RestStatus.OK, builder);
@@ -133,7 +131,6 @@ public boolean canTripCircuitBreaker() {
     static final class Fields {
         static final String WAIT_FOR_TIMED_OUT = "wait_for_timed_out";
         static final String CLUSTER_NAME = "cluster_name";
-        static final String CLUSTER_STATE_SIZE = "compressed_size";
-        static final String CLUSTER_STATE_SIZE_IN_BYTES = "compressed_size_in_bytes";
     }
+
 }
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java
index 707378eec4cf6..65be56abd76da 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java
@@ -51,8 +51,8 @@ public class RestGetIndexTemplateAction extends BaseRestHandler {
         Collections.singleton(INCLUDE_TYPE_NAME_PARAMETER), Settings.FORMAT_PARAMS));
     private static final DeprecationLogger deprecationLogger = new DeprecationLogger(
             LogManager.getLogger(RestGetIndexTemplateAction.class));
-    public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" +
-            " Specifying include_type_name in get index template requests is deprecated.";
+    public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in get " +
+        "index template requests is deprecated. The parameter will be removed in the next major version.";
 
     public RestGetIndexTemplateAction(final Settings settings, final RestController controller) {
         super(settings);
diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
index 7861a4fe9d1bd..08ddbb728c1ab 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java
@@ -95,7 +95,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
         bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
         bulkRequest.setRefreshPolicy(request.param("refresh"));
         bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting,
-            defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType());
+            defaultFetchSourceContext, defaultPipeline, allowExplicitIndex, request.getXContentType());
 
         return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel));
     }
diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java
index 81c6273ec1a36..8db06fd16c582 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchModule.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java
@@ -299,6 +299,16 @@ public class SearchModule {
     private final List namedWriteables = new ArrayList<>();
     private final List namedXContents = new ArrayList<>();
 
+    /**
+     * Constructs a new SearchModule object
+     *
+     * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided.
+     *       When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings.
+     *
+     * @param settings Current settings
+     * @param transportClient Is this being constructed in the TransportClient or not
+     * @param plugins List of included {@link SearchPlugin} objects.
+     */
     public SearchModule(Settings settings, boolean transportClient, List plugins) {
         this.settings = settings;
         this.transportClient = transportClient;
diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java
index a14b4a328775c..ab1da37d41e8d 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchService.java
@@ -109,6 +109,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executor;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -1004,8 +1005,8 @@ public void run() {
         }
     }
 
-    public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
-        return indicesService.buildAliasFilter(state, index, expressions);
+    public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) {
+        return indicesService.buildAliasFilter(state, index, resolvedExpressions);
     }
 
     /**
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
index d6eb73514d9c3..9683651391cc2 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
@@ -161,8 +161,7 @@ public AggParseContext(String name) {
         }
     }
 
-    public static final AggregatorFactories EMPTY = new AggregatorFactories(new AggregatorFactory[0],
-            new ArrayList());
+    public static final AggregatorFactories EMPTY = new AggregatorFactories(new AggregatorFactory[0], new ArrayList<>());
 
     private AggregatorFactory[] factories;
     private List pipelineAggregatorFactories;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
index eafdbe109776b..b525fd32d918a 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java
@@ -61,7 +61,7 @@ public ReduceContext(BigArrays bigArrays, ScriptService scriptService, IntConsum
         /**
          * Returns true iff the current reduce phase is the final reduce phase. This indicates if operations like
          * pipeline aggregations should be applied or if specific features like {@code minDocCount} should be taken into account.
-         * Operations that are potentially loosing information can only be applied during the final reduce phase.
+         * Operations that are potentially losing information can only be applied during the final reduce phase.
          */
         public boolean isFinalReduce() {
             return isFinalReduce;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java
index 95140b50d2bdf..70135c2d51e73 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java
@@ -18,17 +18,22 @@
  */
 package org.elasticsearch.search.aggregations;
 
+import org.elasticsearch.Version;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
 import org.elasticsearch.common.io.stream.Streamable;
 import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 import static java.util.Collections.emptyMap;
 
@@ -48,23 +53,56 @@ public final class InternalAggregations extends Aggregations implements Streamab
         }
     };
 
+    private List topLevelPipelineAggregators = Collections.emptyList();
+
     private InternalAggregations() {
     }
 
     /**
-     * Constructs a new addAggregation.
+     * Constructs a new aggregation.
      */
     public InternalAggregations(List aggregations) {
         super(aggregations);
     }
 
     /**
-     * Reduces the given lists of addAggregation.
-     *
-     * @param aggregationsList  A list of aggregation to reduce
-     * @return                  The reduced addAggregation
+     * Constructs a new aggregation providing its {@link InternalAggregation}s and {@link SiblingPipelineAggregator}s
+     */
+    public InternalAggregations(List aggregations, List topLevelPipelineAggregators) {
+        super(aggregations);
+        this.topLevelPipelineAggregators = Objects.requireNonNull(topLevelPipelineAggregators);
+    }
+
+    /**
+     * Returns the top-level pipeline aggregators.
+     * Note that top-level pipeline aggregators become normal aggregation once the final reduction has been performed, after which they
+     * become part of the list of {@link InternalAggregation}s.
      */
-    public static InternalAggregations reduce(List aggregationsList, ReduceContext context) {
+    List getTopLevelPipelineAggregators() {
+        return topLevelPipelineAggregators;
+    }
+
+    /**
+     * Reduces the given list of aggregations as well as the top-level pipeline aggregators extracted from the first
+     * {@link InternalAggregations} object found in the list.
+     * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched.
+     */
+    public static InternalAggregations reduce(List aggregationsList,
+                                              ReduceContext context) {
+        if (aggregationsList.isEmpty()) {
+            return null;
+        }
+        InternalAggregations first = aggregationsList.get(0);
+        return reduce(aggregationsList, first.topLevelPipelineAggregators, context);
+    }
+
+    /**
+     * Reduces the given list of aggregations as well as the provided top-level pipeline aggregators.
+     * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched.
+     */
+    public static InternalAggregations reduce(List aggregationsList,
+                                              List topLevelPipelineAggregators,
+                                              ReduceContext context) {
         if (aggregationsList.isEmpty()) {
             return null;
         }
@@ -89,7 +127,15 @@ public static InternalAggregations reduce(List aggregation
             InternalAggregation first = aggregations.get(0); // the list can't be empty as it's created on demand
             reducedAggregations.add(first.reduce(aggregations, context));
         }
-        return new InternalAggregations(reducedAggregations);
+
+        if (context.isFinalReduce()) {
+            for (SiblingPipelineAggregator pipelineAggregator : topLevelPipelineAggregators) {
+                InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(reducedAggregations), context);
+                reducedAggregations.add(newAgg);
+            }
+            return new InternalAggregations(reducedAggregations);
+        }
+        return new InternalAggregations(reducedAggregations, topLevelPipelineAggregators);
     }
 
     public static InternalAggregations readAggregations(StreamInput in) throws IOException {
@@ -104,11 +150,20 @@ public void readFrom(StreamInput in) throws IOException {
         if (aggregations.isEmpty()) {
             aggregationsAsMap = emptyMap();
         }
+        if (in.getVersion().onOrAfter(Version.V_6_7_0)) {
+            this.topLevelPipelineAggregators = in.readList(
+                stream -> (SiblingPipelineAggregator)in.readNamedWriteable(PipelineAggregator.class));
+        } else {
+            this.topLevelPipelineAggregators = Collections.emptyList();
+        }
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public void writeTo(StreamOutput out) throws IOException {
         out.writeNamedWriteableList((List)aggregations);
+        if (out.getVersion().onOrAfter(Version.V_6_7_0)) {
+            out.writeNamedWriteableList(topLevelPipelineAggregators);
+        }
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
index 84154926bf665..8a8e4e8d77ff6 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java
@@ -74,7 +74,6 @@ public Map cache() {
             }
             return cache;
         }
-
     }
 
     /**
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
index 123e18a4da618..2a75e9c58f4fc 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/UnifiedHighlighter.java
@@ -35,9 +35,11 @@
 import org.elasticsearch.common.text.Text;
 import org.elasticsearch.index.IndexSettings;
 import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.IdFieldMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
 import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
 import org.elasticsearch.search.internal.SearchContext;
 
 import java.io.IOException;
@@ -68,12 +70,13 @@ public HighlightField highlight(HighlighterContext highlighterContext) {
         int numberOfFragments;
         try {
 
-            final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType);
+            final Analyzer analyzer = getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType,
+                    hitContext);
             List fieldValues = loadFieldValues(fieldType, field, context, hitContext);
             if (fieldValues.size() == 0) {
                 return null;
             }
-            final PassageFormatter passageFormatter = getPassageFormatter(field, encoder);
+            final PassageFormatter passageFormatter = getPassageFormatter(hitContext, field, encoder);
             final IndexSearcher searcher = new IndexSearcher(hitContext.reader());
             final CustomUnifiedHighlighter highlighter;
             final String fieldValue = mergeFieldValues(fieldValues, MULTIVAL_SEP_CHAR);
@@ -107,7 +110,9 @@ public HighlightField highlight(HighlighterContext highlighterContext) {
                 final String fieldName = highlighterContext.fieldName;
                 highlighter.setFieldMatcher((name) -> fieldName.equals(name));
             } else {
-                highlighter.setFieldMatcher((name) -> true);
+                // ignore terms that targets the _id field since they use a different encoding
+                // that is not compatible with utf8
+                highlighter.setFieldMatcher(name -> IdFieldMapper.NAME.equals(name) == false);
             }
 
             Snippet[] fieldSnippets = highlighter.highlightField(highlighterContext.fieldName,
@@ -122,8 +127,6 @@ public HighlightField highlight(HighlighterContext highlighterContext) {
                 "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
         }
 
-        snippets = filterSnippets(snippets, field.fieldOptions().numberOfFragments());
-
         if (field.fieldOptions().scoreOrdered()) {
             //let's sort the snippets by score if needed
             CollectionUtil.introSort(snippets, (o1, o2) -> Double.compare(o2.getScore(), o1.getScore()));
@@ -140,14 +143,14 @@ public HighlightField highlight(HighlighterContext highlighterContext) {
         return null;
     }
 
-    protected PassageFormatter getPassageFormatter(SearchContextHighlight.Field field, Encoder encoder) {
+    protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchContextHighlight.Field field, Encoder encoder) {
         CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0],
             field.fieldOptions().postTags()[0], encoder);
         return passageFormatter;
     }
 
     
-    protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) {
+    protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) {
         return HighlightUtils.getAnalyzer(docMapper, type);
     }
     
@@ -183,41 +186,6 @@ protected BreakIterator getBreakIterator(SearchContextHighlight.Field field) {
         }
     }
 
-    protected static List filterSnippets(List snippets, int numberOfFragments) {
-
-        //We need to filter the snippets as due to no_match_size we could have
-        //either highlighted snippets or non highlighted ones and we don't want to mix those up
-        List filteredSnippets = new ArrayList<>(snippets.size());
-        for (Snippet snippet : snippets) {
-            if (snippet.isHighlighted()) {
-                filteredSnippets.add(snippet);
-            }
-        }
-
-        //if there's at least one highlighted snippet, we return all the highlighted ones
-        //otherwise we return the first non highlighted one if available
-        if (filteredSnippets.size() == 0) {
-            if (snippets.size() > 0) {
-                Snippet snippet = snippets.get(0);
-                //if we tried highlighting the whole content using whole break iterator (as number_of_fragments was 0)
-                //we need to return the first sentence of the content rather than the whole content
-                if (numberOfFragments == 0) {
-                    BreakIterator bi = BreakIterator.getSentenceInstance(Locale.ROOT);
-                    String text = snippet.getText();
-                    bi.setText(text);
-                    int next = bi.next();
-                    if (next != BreakIterator.DONE) {
-                        String newText = text.substring(0, next).trim();
-                        snippet = new Snippet(newText, snippet.getScore(), snippet.isHighlighted());
-                    }
-                }
-                filteredSnippets.add(snippet);
-            }
-        }
-
-        return filteredSnippets;
-    }
-
     protected static String convertFieldValue(MappedFieldType type, Object value) {
         if (value instanceof BytesRef) {
             return type.valueForDisplay(value).toString();
diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
index ab8fb5dbcae02..aa4af9822e47e 100644
--- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
+++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java
@@ -39,10 +39,10 @@ final class ProfileScorer extends Scorer {
     private final Scorer scorer;
     private ProfileWeight profileWeight;
 
-    private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer;
+    private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer,
+        setMinCompetitiveScoreTimer;
     private final boolean isConstantScoreQuery;
 
-
     ProfileScorer(ProfileWeight w, Scorer scorer, QueryProfileBreakdown profile) throws IOException {
         super(w);
         this.scorer = scorer;
@@ -53,6 +53,7 @@ final class ProfileScorer extends Scorer {
         matchTimer = profile.getTimer(QueryTimingType.MATCH);
         shallowAdvanceTimer = profile.getTimer(QueryTimingType.SHALLOW_ADVANCE);
         computeMaxScoreTimer = profile.getTimer(QueryTimingType.COMPUTE_MAX_SCORE);
+        setMinCompetitiveScoreTimer = profile.getTimer(QueryTimingType.SET_MIN_COMPETITIVE_SCORE);
         ProfileScorer profileScorer = null;
         if (w.getQuery() instanceof ConstantScoreQuery && scorer instanceof ProfileScorer) {
             //Case when we have a totalHits query and it is not cached
@@ -219,4 +220,14 @@ public float getMaxScore(int upTo) throws IOException {
             computeMaxScoreTimer.stop();
         }
     }
+
+    @Override
+    public void setMinCompetitiveScore(float minScore) throws IOException {
+        setMinCompetitiveScoreTimer.start();
+        try {
+            scorer.setMinCompetitiveScore(minScore);
+        } finally {
+            setMinCompetitiveScoreTimer.stop();
+        }
+    }
 }
diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
index 146bd8f07bcd1..aecc41d8a23b5 100644
--- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
+++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java
@@ -29,7 +29,8 @@ public enum QueryTimingType {
     MATCH,
     SCORE,
     SHALLOW_ADVANCE,
-    COMPUTE_MAX_SCORE;
+    COMPUTE_MAX_SCORE,
+    SET_MIN_COMPETITIVE_SCORE;
 
     @Override
     public String toString() {
diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index 43654823914b4..55787dfc53a35 100644
--- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -36,10 +36,11 @@
 import org.elasticsearch.search.suggest.Suggest;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.List;
+import java.util.Objects;
 import java.util.stream.Collectors;
 
-import static java.util.Collections.emptyList;
 import static org.elasticsearch.common.lucene.Lucene.readTopDocs;
 import static org.elasticsearch.common.lucene.Lucene.writeTopDocs;
 
@@ -54,7 +55,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
     private DocValueFormat[] sortValueFormats;
     private InternalAggregations aggregations;
     private boolean hasAggs;
-    private List pipelineAggregators;
+    private List pipelineAggregators = Collections.emptyList();
     private Suggest suggest;
     private boolean searchTimedOut;
     private Boolean terminatedEarly = null;
@@ -80,7 +81,6 @@ public QuerySearchResult queryResult() {
         return this;
     }
 
-
     public void searchTimedOut(boolean searchTimedOut) {
         this.searchTimedOut = searchTimedOut;
     }
@@ -204,7 +204,7 @@ public List pipelineAggregators() {
     }
 
     public void pipelineAggregators(List pipelineAggregators) {
-        this.pipelineAggregators = pipelineAggregators;
+        this.pipelineAggregators = Objects.requireNonNull(pipelineAggregators);
     }
 
     public Suggest suggest() {
@@ -338,7 +338,7 @@ public void writeToNoId(StreamOutput out) throws IOException {
             out.writeBoolean(true);
             aggregations.writeTo(out);
         }
-        out.writeNamedWriteableList(pipelineAggregators == null ? emptyList() : pipelineAggregators);
+        out.writeNamedWriteableList(pipelineAggregators);
         if (suggest == null) {
             out.writeBoolean(false);
         } else {
diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
index 9bd1efbe757f3..bd207f71a0a8e 100644
--- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
@@ -311,8 +311,10 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException {
                 return SORT_DOC;
             }
         } else {
+            boolean isUnmapped = false;
             MappedFieldType fieldType = context.fieldMapper(fieldName);
             if (fieldType == null) {
+                isUnmapped = true;
                 if (unmappedType != null) {
                     fieldType = context.getMapperService().unmappedFieldType(unmappedType);
                 } else {
@@ -330,20 +332,22 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException {
                 localSortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN;
             }
 
-            final Nested nested;
-            if (nestedSort != null) {
-                if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) {
-                    throw new QueryShardException(context,
-                        "max_children is only supported on v6.5.0 or higher");
-                }
-                if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE)  {
-                    throw new QueryShardException(context,
-                        "max_children is only supported on last level of nested sort");
+            Nested nested = null;
+            if (isUnmapped == false) {
+                if (nestedSort != null) {
+                    if (context.indexVersionCreated().before(Version.V_6_5_0) && nestedSort.getMaxChildren() != Integer.MAX_VALUE) {
+                        throw new QueryShardException(context,
+                            "max_children is only supported on v6.5.0 or higher");
+                    }
+                    if (nestedSort.getNestedSort() != null && nestedSort.getMaxChildren() != Integer.MAX_VALUE) {
+                        throw new QueryShardException(context,
+                            "max_children is only supported on last level of nested sort");
+                    }
+                    // new nested sorts takes priority
+                    nested = resolveNested(context, nestedSort);
+                } else {
+                    nested = resolveNested(context, nestedPath, nestedFilter);
                 }
-                // new nested sorts takes priority
-                nested = resolveNested(context, nestedSort);
-            } else {
-                nested = resolveNested(context, nestedPath, nestedFilter);
             }
 
             IndexFieldData fieldData = context.getForField(fieldType);
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
index e2628fda991bd..f6ed3eb75d859 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
@@ -331,7 +331,6 @@ public void onFailure(final Exception e) {
             public TimeValue timeout() {
                 return request.masterNodeTimeout();
             }
-
         });
     }
 
@@ -394,6 +393,8 @@ private void beginSnapshot(final ClusterState clusterState,
 
             boolean snapshotCreated;
 
+            boolean hadAbortedInitializations;
+
             @Override
             protected void doRun() {
                 assert initializingSnapshots.contains(snapshot.snapshot());
@@ -433,6 +434,8 @@ public ClusterState execute(ClusterState currentState) {
 
                             if (entry.state() == State.ABORTED) {
                                 entries.add(entry);
+                                assert entry.shards().isEmpty();
+                                hadAbortedInitializations = true;
                             } else {
                                 // Replace the snapshot that was just initialized
                                 ImmutableOpenMap shards =
@@ -491,6 +494,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
                         // completion listener in this method. For the snapshot completion to work properly, the snapshot
                         // should still exist when listener is registered.
                         userCreateSnapshotListener.onResponse(snapshot.snapshot());
+
+                        if (hadAbortedInitializations) {
+                            final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE);
+                            assert snapshotsInProgress != null;
+                            final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot());
+                            assert entry != null;
+                            endSnapshot(entry);
+                        }
                     }
                 });
             }
@@ -701,8 +712,8 @@ public void applyClusterState(ClusterChangedEvent event) {
                     // 3. Snapshots in any other state that have all their shard tasks completed
                     snapshotsInProgress.entries().stream().filter(
                         entry -> entry.state().completed()
-                            || entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false
-                            || entry.state() != State.INIT && completed(entry.shards().values())
+                            || initializingSnapshots.contains(entry.snapshot()) == false
+                               && (entry.state() == State.INIT || completed(entry.shards().values()))
                     ).forEach(this::endSnapshot);
                 }
                 if (newMaster) {
diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index 5ca2b15d6ffe0..8cf22758f5531 100644
--- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -162,7 +162,8 @@ public Collection builders() {
     }
 
     public static Setting ESTIMATED_TIME_INTERVAL_SETTING =
-        Setting.timeSetting("thread_pool.estimated_time_interval", TimeValue.timeValueMillis(200), Setting.Property.NodeScope);
+        Setting.timeSetting("thread_pool.estimated_time_interval",
+            TimeValue.timeValueMillis(200), TimeValue.ZERO, Setting.Property.NodeScope);
 
     public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) {
         assert Node.NODE_NAME_SETTING.exists(settings);
@@ -555,22 +556,36 @@ static class CachedTimeThread extends Thread {
         /**
          * Return the current time used for relative calculations. This is
          * {@link System#nanoTime()} truncated to milliseconds.
+         * 

+ * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 + * then the cache is disabled and the method calls {@link System#nanoTime()} + * whenever called. Typically used for testing. */ long relativeTimeInMillis() { - return relativeMillis; + if (0 < interval) { + return relativeMillis; + } + return TimeValue.nsecToMSec(System.nanoTime()); } /** * Return the current epoch time, used to find absolute time. This is * a cached version of {@link System#currentTimeMillis()}. + *

+ * If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 + * then the cache is disabled and the method calls {@link System#currentTimeMillis()} + * whenever called. Typically used for testing. */ long absoluteTimeInMillis() { - return absoluteMillis; + if (0 < interval) { + return absoluteMillis; + } + return System.currentTimeMillis(); } @Override public void run() { - while (running) { + while (running && 0 < interval) { relativeMillis = TimeValue.nsecToMSec(System.nanoTime()); absoluteMillis = System.currentTimeMillis(); try { diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index f1067a0c5575f..da86ed076e396 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -38,8 +37,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -56,19 +53,17 @@ public class ConnectionManager implements Closeable { private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); private final KeyedLock connectionLock = new KeyedLock<>(); private final Transport transport; - private final ThreadPool threadPool; private final ConnectionProfile defaultProfile; private final AtomicBoolean isClosed = new AtomicBoolean(false); private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); - public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) { - this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport, threadPool); + public ConnectionManager(Settings settings, Transport transport) { + this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport); } - public ConnectionManager(ConnectionProfile connectionProfile, Transport transport, ThreadPool threadPool) { + public ConnectionManager(ConnectionProfile connectionProfile, Transport transport) { this.transport = transport; - this.threadPool = threadPool; this.defaultProfile = connectionProfile; } @@ -185,35 +180,23 @@ public int size() { @Override public void close() { + Transports.assertNotTransportThread("Closing ConnectionManager"); if (isClosed.compareAndSet(false, true)) { - CountDownLatch latch = new CountDownLatch(1); - - // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService - threadPool.generic().execute(() -> { - closeLock.writeLock().lock(); - try { - // we are holding a write lock so nobody adds to the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); - } + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody adds to the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); } - } finally { - closeLock.writeLock().unlock(); - latch.countDown(); } - }); - - try { - latch.await(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore + } finally { + closeLock.writeLock().unlock(); } } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 9b9243b612b74..e65f39e8c7c18 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -142,9 +143,10 @@ public String getKey(final String key) { }; /** - * A proxy address for the remote cluster. - * NOTE: this settings is undocumented until we have at last one transport that supports passing - * on the hostname via a mechanism like SNI. + * A proxy address for the remote cluster. By default this is not set, meaning that Elasticsearch will connect directly to the nodes in + * the remote cluster using their publish addresses. If this setting is set to an IP address or hostname then Elasticsearch will connect + * to the nodes in the remote cluster using this address instead. Use of this setting is not recommended and it is deliberately + * undocumented as it does not work well with all proxies. */ public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( "cluster.remote.", @@ -282,21 +284,38 @@ protected Map> groupClusterIndices(Set remoteCluste return perClusterIndices; } + void updateRemoteCluster(String clusterAlias, List addresses, String proxy) { + Boolean compress = TransportSettings.TRANSPORT_COMPRESS.get(settings); + TimeValue pingSchedule = TransportSettings.PING_SCHEDULE.get(settings); + updateRemoteCluster(clusterAlias, addresses, proxy, compress, pingSchedule); + } + + void updateRemoteCluster(String clusterAlias, Settings settings) { + String proxy = REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterAlias).get(settings); + List addresses = REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace(clusterAlias).get(settings); + Boolean compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings); + TimeValue pingSchedule = RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE + .getConcreteSettingForNamespace(clusterAlias) + .get(settings); + + updateRemoteCluster(clusterAlias, addresses, proxy, compress, pingSchedule); + } + /** * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses, String proxy, boolean compressionEnabled, + TimeValue pingSchedule); /** * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - clusterSettings.addAffixUpdateConsumer( - RemoteClusterAware.REMOTE_CLUSTERS_PROXY, - RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, - (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), - (namespace, value) -> {}); + List> remoteClusterSettings = Arrays.asList(RemoteClusterAware.REMOTE_CLUSTERS_PROXY, + RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, + RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE); + clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::updateRemoteCluster); clusterSettings.addAffixUpdateConsumer( RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 57820a8ca48a9..f4a1b250e7f5e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -64,9 +64,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE; - /** * Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not @@ -107,12 +104,13 @@ final class RemoteClusterConnection implements TransportConnectionListener, Clos * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to * @param proxyAddress the proxy address + * @param connectionProfile the connection profile to use */ RemoteClusterConnection(Settings settings, String clusterAlias, List>> seedNodes, TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate, - String proxyAddress) { + String proxyAddress, ConnectionProfile connectionProfile) { this(settings, clusterAlias, seedNodes, transportService, maxNumRemoteConnections, nodePredicate, proxyAddress, - createConnectionManager(settings, clusterAlias, transportService)); + createConnectionManager(connectionProfile, transportService)); } // Public for tests to pass a StubbableConnectionManager @@ -309,13 +307,23 @@ Transport.Connection getConnection() { @Override public void close() throws IOException { - IOUtils.close(connectHandler, connectionManager); + IOUtils.close(connectHandler); + // In the ConnectionManager we wait on connections being closed. + threadPool.generic().execute(connectionManager::close); } public boolean isClosed() { return connectHandler.isClosed(); } + public String getProxyAddress() { + return proxyAddress; + } + + public List>> getSeedNodes() { + return seedNodes; + } + /** * The connect handler manages node discovery and the actual connect to the remote cluster. * There is at most one connect job running at any time. If such a connect job is triggered @@ -697,18 +705,8 @@ private synchronized void ensureIteratorAvailable() { } } - private static ConnectionManager createConnectionManager(Settings settings, String clusterAlias, TransportService transportService) { - ConnectionProfile.Builder builder = new ConnectionProfile.Builder() - .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) - .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) - .addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) // TODO make this configurable? - // we don't want this to be used for anything else but search - .addConnections(0, TransportRequestOptions.Type.BULK, - TransportRequestOptions.Type.STATE, - TransportRequestOptions.Type.RECOVERY) - .setCompressionEnabled(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) - .setPingInterval(REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)); - return new ConnectionManager(builder.build(), transportService.transport, transportService.threadPool); + private static ConnectionManager createConnectionManager(ConnectionProfile connectionProfile, TransportService transportService) { + return new ConnectionManager(connectionProfile, transportService.transport); } ConnectionManager getConnectionManager() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 009ee48dd8a99..ccf8876318f2e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -47,6 +47,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -67,6 +68,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl private static final Logger logger = LogManager.getLogger(RemoteClusterService.class); + private static final ActionListener noopListener = ActionListener.wrap((x) -> {}, (x) -> {}); + static { // remove search.remote.* settings in 8.0.0 assert Version.CURRENT.major < 8; @@ -185,6 +188,7 @@ public String getKey(final String key) { private final TransportService transportService; private final int numRemoteConnections; private volatile Map remoteClusters = Collections.emptyMap(); + private volatile Map remoteClusterConnectionProfiles = Collections.emptyMap(); RemoteClusterService(Settings settings, TransportService transportService) { super(settings); @@ -212,21 +216,34 @@ private synchronized void updateRemoteClusters(Map>> seedList = entry.getValue().v2(); String proxyAddress = entry.getValue().v1(); - RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); + String clusterAlias = entry.getKey(); + RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); + ConnectionProfile connectionProfile = this.remoteClusterConnectionProfiles.get(clusterAlias); if (seedList.isEmpty()) { // with no seed nodes we just remove the connection try { IOUtils.close(remote); } catch (IOException e) { - logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e); + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); } - remoteClusters.remove(entry.getKey()); + remoteClusters.remove(clusterAlias); continue; } if (remote == null) { // this is a new cluster we have to add a new representation - String clusterAlias = entry.getKey(); remote = new RemoteClusterConnection(settings, clusterAlias, seedList, transportService, numRemoteConnections, - getNodePredicate(settings), proxyAddress); + getNodePredicate(settings), proxyAddress, connectionProfile); + remoteClusters.put(clusterAlias, remote); + } else if (connectionProfileChanged(remote.getConnectionManager().getConnectionProfile(), connectionProfile) + || seedsChanged(remote.getSeedNodes(), seedList)) { + // New ConnectionProfile. Must tear down existing connection + try { + IOUtils.close(remote); + } catch (IOException e) { + logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); + } + remoteClusters.remove(clusterAlias); + remote = new RemoteClusterConnection(settings, clusterAlias, seedList, transportService, numRemoteConnections, + getNodePredicate(settings), proxyAddress, connectionProfile); remoteClusters.put(clusterAlias, remote); } @@ -243,7 +260,7 @@ private synchronized void updateRemoteClusters(Map addresses, String proxyAddress) { - updateRemoteCluster(clusterAlias, addresses, proxyAddress, ActionListener.wrap((x) -> {}, (x) -> {})); + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress, boolean compressionEnabled, + TimeValue pingSchedule) { + if (LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); + } + ConnectionProfile oldProfile = remoteClusterConnectionProfiles.get(clusterAlias); + ConnectionProfile newProfile; + if (oldProfile != null) { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(oldProfile); + builder.setCompressionEnabled(compressionEnabled); + builder.setPingInterval(pingSchedule); + newProfile = builder.build(); + } else { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(buildConnectionProfileFromSettings(clusterAlias)); + builder.setCompressionEnabled(compressionEnabled); + builder.setPingInterval(pingSchedule); + newProfile = builder.build(); + } + updateRemoteCluster(clusterAlias, addresses, proxyAddress, newProfile, noopListener); } - void updateRemoteCluster( - final String clusterAlias, - final List addresses, - final String proxyAddress, - final ActionListener connectionListener) { + void updateRemoteCluster(final String clusterAlias, final List addresses, final String proxyAddress, + final ConnectionProfile connectionProfile, final ActionListener connectionListener) { + HashMap connectionProfiles = new HashMap<>(remoteClusterConnectionProfiles); + connectionProfiles.put(clusterAlias, connectionProfile); + this.remoteClusterConnectionProfiles = Collections.unmodifiableMap(connectionProfiles); final List>> nodes = - addresses.stream().>>map(address -> Tuple.tuple(address, () -> - buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))) - ).collect(Collectors.toList()); + addresses.stream().>>map(address -> Tuple.tuple(address, () -> + buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress))) + ).collect(Collectors.toList()); updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener); } @@ -386,6 +420,7 @@ void initializeRemoteClusters() { final PlainActionFuture future = new PlainActionFuture<>(); Map>>>> seeds = RemoteClusterAware.buildRemoteClustersDynamicConfig(settings); + initializeConnectionProfiles(seeds.keySet()); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); @@ -398,6 +433,32 @@ void initializeRemoteClusters() { } } + private synchronized void initializeConnectionProfiles(Set remoteClusters) { + Map connectionProfiles = new HashMap<>(remoteClusters.size()); + for (String clusterName : remoteClusters) { + connectionProfiles.put(clusterName, buildConnectionProfileFromSettings(clusterName)); + } + this.remoteClusterConnectionProfiles = Collections.unmodifiableMap(connectionProfiles); + } + + private ConnectionProfile buildConnectionProfileFromSettings(String clusterName) { + return buildConnectionProfileFromSettings(settings, clusterName); + } + + static ConnectionProfile buildConnectionProfileFromSettings(Settings settings, String clusterName) { + return new ConnectionProfile.Builder() + .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) + .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) + .addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) // TODO make this configurable? + // we don't want this to be used for anything else but search + .addConnections(0, TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.STATE, + TransportRequestOptions.Type.RECOVERY) + .setCompressionEnabled(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterName).get(settings)) + .setPingInterval(REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterName).get(settings)) + .build(); + } + @Override public void close() throws IOException { IOUtils.close(remoteClusters.values()); @@ -407,6 +468,21 @@ public Stream getRemoteConnectionInfos() { return remoteClusters.values().stream().map(RemoteClusterConnection::getConnectionInfo); } + private boolean connectionProfileChanged(ConnectionProfile oldProfile, ConnectionProfile newProfile) { + return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false + || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false; + } + + private boolean seedsChanged(final List>> oldSeedNodes, + final List>> newSeedNodes) { + if (oldSeedNodes.size() != newSeedNodes.size()) { + return true; + } + Set oldSeeds = oldSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + Set newSeeds = newSeedNodes.stream().map(Tuple::v1).collect(Collectors.toSet()); + return oldSeeds.equals(newSeeds) == false; + } + /** * Collects all nodes of the given clusters and returns / passes a (clusterAlias, nodeId) to {@link DiscoveryNode} * function on success. diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index 7c51ca7b9c892..b79b79236d958 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -92,6 +92,30 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { skipUnavailable = input.readBoolean(); } + public List getSeedNodes() { + return seedNodes; + } + + public int getConnectionsPerCluster() { + return connectionsPerCluster; + } + + public TimeValue getInitialConnectionTimeout() { + return initialConnectionTimeout; + } + + public int getNumNodesConnected() { + return numNodesConnected; + } + + public String getClusterAlias() { + return clusterAlias; + } + + public boolean isSkipUnavailable() { + return skipUnavailable; + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_7_0_0)) { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 2ff5ae1583e37..d5a524105dd01 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -388,28 +388,28 @@ private InetSocketAddress bindToPort(final String name, final InetAddress hostAd PortsRange portsRange = new PortsRange(port); final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); - boolean success = portsRange.iterate(portNumber -> { - try { - TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); - synchronized (serverChannels) { - List list = serverChannels.get(name); - if (list == null) { - list = new ArrayList<>(); - serverChannels.put(name, list); - } - list.add(channel); + closeLock.writeLock().lock(); + try { + if (lifecycle.initialized() == false && lifecycle.started() == false) { + throw new IllegalStateException("transport has been stopped"); + } + boolean success = portsRange.iterate(portNumber -> { + try { + TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); + serverChannels.computeIfAbsent(name, k -> new ArrayList<>()).add(channel); boundSocket.set(channel.getLocalAddress()); + } catch (Exception e) { + lastException.set(e); + return false; } - } catch (Exception e) { - lastException.set(e); - return false; + return true; + }); + if (!success) { + throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); } - return true; - }); - if (!success) { - throw new BindTransportException("Failed to bind to [" + port + "]", lastException.get()); + } finally { + closeLock.writeLock().unlock(); } - if (logger.isDebugEnabled()) { logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); } @@ -553,6 +553,7 @@ protected final void doClose() { protected final void doStop() { final CountDownLatch latch = new CountDownLatch(1); // make sure we run it on another thread than a possible IO handler thread + assert threadPool.generic().isShutdown() == false : "Must stop transport before terminating underlying threadpool"; threadPool.generic().execute(() -> { closeLock.writeLock().lock(); try { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 3ea15bba43a84..c6ea7292a935d 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -149,7 +149,7 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { this(settings, transport, threadPool, transportInterceptor, localNodeFactory, clusterSettings, taskHeaders, - new ConnectionManager(settings, transport, threadPool)); + new ConnectionManager(settings, transport)); } public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, @@ -272,6 +272,7 @@ public void onFailure(Exception e) { } @Override public void doRun() { + // cf. ExceptionsHelper#isTransportStoppedForAction TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); holderToNotify.handler().handleException(ex); } @@ -616,8 +617,13 @@ private void sendRequestInternal(final Transport.C } try { if (lifecycle.stoppedOrClosed()) { - // if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify - // the caller. It will only notify if the toStop code hasn't done the work yet. + /* + * If we are not started the exception handling will remove the request holder again and calls the handler to notify the + * caller. It will only notify if toStop hasn't done the work yet. + * + * Do not edit this exception message, it is currently relied upon in production code! + */ + // TODO: make a dedicated exception for a stopped transport service? cf. ExceptionsHelper#isTransportStoppedForAction throw new TransportException("TransportService is closed stopped can't send request"); } if (timeoutHandler != null) { diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index a4a33426a43ce..d6b15f3df43dc 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -56,6 +56,8 @@ grant codeBase "${codebase.randomizedtesting-runner}" { grant codeBase "${codebase.junit}" { // needed for TestClass creation permission java.lang.RuntimePermission "accessDeclaredMembers"; + // needed for test listener notifications + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; grant codeBase "${codebase.mocksocket}" { @@ -86,3 +88,20 @@ grant codeBase "${codebase.httpasyncclient}" { // rest client uses system properties which gets the default proxy permission java.net.NetPermission "getProxySelector"; }; + +grant codeBase "file:${gradle.dist.lib}/-" { + // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production + // dependency and there's no point in exercising the security policy against it + permission java.security.AllPermission; +}; + +grant codeBase "file:${gradle.worker.jar}" { + // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production + // dependency and there's no point in exercising the security policy against it + permission java.security.AllPermission; +}; + +grant { + // since the gradle test worker jar is on the test classpath, our tests should be able to read it + permission java.io.FilePermission "${gradle.worker.jar}", "read"; +}; \ No newline at end of file diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 1ad067a7e2b36..9d05e119cbb78 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -28,10 +28,12 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; @@ -52,6 +54,8 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; public class BlendedTermQueryTests extends ESTestCase { public void testDismaxQuery() throws IOException { @@ -114,6 +118,61 @@ public void testDismaxQuery() throws IOException { assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue()); } + { + // test with an unknown field + String[] fields = new String[] {"username", "song", "unknown_field"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 1.0f); + Query rewrite = searcher.rewrite(query); + assertThat(rewrite, instanceOf(BooleanQuery.class)); + for (BooleanClause clause : (BooleanQuery) rewrite) { + assertThat(clause.getQuery(), instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) clause.getQuery(); + TermStates termStates = termQuery.getTermStates(); + if (termQuery.getTerm().field().equals("unknown_field")) { + assertThat(termStates.docFreq(), equalTo(0)); + assertThat(termStates.totalTermFreq(), equalTo(0L)); + } else { + assertThat(termStates.docFreq(), greaterThan(0)); + assertThat(termStates.totalTermFreq(), greaterThan(0L)); + } + } + assertThat(searcher.search(query, 10).totalHits.value, equalTo((long) iters + username.length)); + } + { + // test with an unknown field and an unknown term + String[] fields = new String[] {"username", "song", "unknown_field"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "unknown_term"), 1.0f); + Query rewrite = searcher.rewrite(query); + assertThat(rewrite, instanceOf(BooleanQuery.class)); + for (BooleanClause clause : (BooleanQuery) rewrite) { + assertThat(clause.getQuery(), instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) clause.getQuery(); + TermStates termStates = termQuery.getTermStates(); + assertThat(termStates.docFreq(), equalTo(0)); + assertThat(termStates.totalTermFreq(), equalTo(0L)); + } + assertThat(searcher.search(query, 10).totalHits.value, equalTo(0L)); + } + { + // test with an unknown field and a term that is present in only one field + String[] fields = new String[] {"username", "song", "id", "unknown_field"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "fan"), 1.0f); + Query rewrite = searcher.rewrite(query); + assertThat(rewrite, instanceOf(BooleanQuery.class)); + for (BooleanClause clause : (BooleanQuery) rewrite) { + assertThat(clause.getQuery(), instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) clause.getQuery(); + TermStates termStates = termQuery.getTermStates(); + if (termQuery.getTerm().field().equals("username")) { + assertThat(termStates.docFreq(), equalTo(1)); + assertThat(termStates.totalTermFreq(), equalTo(1L)); + } else { + assertThat(termStates.docFreq(), equalTo(0)); + assertThat(termStates.totalTermFreq(), equalTo(0L)); + } + } + assertThat(searcher.search(query, 10).totalHits.value, equalTo(1L)); + } reader.close(); w.close(); dir.close(); @@ -168,4 +227,34 @@ public void testExtractTerms() throws IOException { assertThat(extracted.size(), equalTo(terms.size())); assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0]))); } + + public void testMinTTF() throws IOException { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); + FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); + ft.freeze(); + + for (int i = 0; i < 10; i++) { + Document d = new Document(); + d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); + d.add(new Field("dense", "foo foo foo", ft)); + if (i % 10 == 0) { + d.add(new Field("sparse", "foo", ft)); + } + w.addDocument(d); + } + w.commit(); + DirectoryReader reader = DirectoryReader.open(w); + IndexSearcher searcher = setSimilarity(newSearcher(reader)); + { + String[] fields = new String[]{"dense", "sparse"}; + Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f); + TopDocs search = searcher.search(query, 10); + ScoreDoc[] scoreDocs = search.scoreDocs; + assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue()); + } + reader.close(); + w.close(); + dir.close(); + } } diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index 1f99a1f4542b5..e0d8140c708d6 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -20,15 +20,26 @@ package org.elasticsearch; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.Arrays; +import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.sameInstance; + public class BuildTests extends ESTestCase { /** Asking for the jar metadata should not throw exception in tests, no matter how configured */ @@ -115,4 +126,143 @@ public void testEqualsAndHashCode() { ); assertNotEquals(build, differentVersion); } + + private static class WriteableBuild implements Writeable { + private final Build build; + + WriteableBuild(StreamInput in) throws IOException { + build = Build.readBuild(in); + } + + WriteableBuild(Build build) { + this.build = build; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Build.writeBuild(build, out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + WriteableBuild that = (WriteableBuild) o; + return build.equals(that.build); + } + + @Override + public int hashCode() { + return Objects.hash(build); + } + } + + private static String randomStringExcept(final String s) { + return randomAlphaOfLength(13 - s.length()); + } + + public void testSerialization() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new WriteableBuild(new Build( + randomFrom(Build.Flavor.values()), randomFrom(Build.Type.values()), + randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))), + // Note: the cast of the Copy- and MutateFunction is needed for some IDE (specifically Eclipse 4.10.0) to infer the right type + (WriteableBuild b) -> copyWriteable(b, writableRegistry(), WriteableBuild::new, Version.CURRENT), + (WriteableBuild b) -> { + switch (randomIntBetween(1, 6)) { + case 1: + return new WriteableBuild(new Build( + randomValueOtherThan(b.build.flavor(), () -> randomFrom(Build.Flavor.values())), b.build.type(), + b.build.shortHash(), b.build.date(), b.build.isSnapshot(), b.build.getQualifiedVersion())); + case 2: + return new WriteableBuild(new Build(b.build.flavor(), + randomValueOtherThan(b.build.type(), () -> randomFrom(Build.Type.values())), + b.build.shortHash(), b.build.date(), b.build.isSnapshot(), b.build.getQualifiedVersion())); + case 3: + return new WriteableBuild(new Build(b.build.flavor(), b.build.type(), + randomStringExcept(b.build.shortHash()), b.build.date(), b.build.isSnapshot(), b.build.getQualifiedVersion())); + case 4: + return new WriteableBuild(new Build(b.build.flavor(), b.build.type(), + b.build.shortHash(), randomStringExcept(b.build.date()), b.build.isSnapshot(), b.build.getQualifiedVersion())); + case 5: + return new WriteableBuild(new Build(b.build.flavor(), b.build.type(), + b.build.shortHash(), b.build.date(), b.build.isSnapshot() == false, b.build.getQualifiedVersion())); + case 6: + return new WriteableBuild(new Build(b.build.flavor(), b.build.type(), + b.build.shortHash(), b.build.date(), b.build.isSnapshot(), randomStringExcept(b.build.getQualifiedVersion()))); + } + throw new AssertionError(); + }); + } + + public void testSerializationBWC() throws IOException { + final WriteableBuild dockerBuild = new WriteableBuild(new Build(randomFrom(Build.Flavor.values()), Build.Type.DOCKER, + randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6))); + + final List versions = Version.getDeclaredVersions(Version.class); + final Version pre63Version = randomFrom(versions.stream().filter(v -> v.before(Version.V_6_3_0)).collect(Collectors.toList())); + final Version post63Pre67Version = randomFrom(versions.stream() + .filter(v -> v.onOrAfter(Version.V_6_3_0) && v.before(Version.V_6_7_0)).collect(Collectors.toList())); + final Version post67Pre70Version = randomFrom(versions.stream() + .filter(v -> v.onOrAfter(Version.V_6_7_0) && v.before(Version.V_7_0_0)).collect(Collectors.toList())); + final Version post70Version = randomFrom(versions.stream().filter(v -> v.onOrAfter(Version.V_7_0_0)).collect(Collectors.toList())); + + final WriteableBuild pre63 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, pre63Version); + final WriteableBuild post63pre67 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post63Pre67Version); + final WriteableBuild post67pre70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post67Pre70Version); + final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); + + assertThat(pre63.build.flavor(), equalTo(Build.Flavor.OSS)); + assertThat(post63pre67.build.flavor(), equalTo(dockerBuild.build.flavor())); + assertThat(post67pre70.build.flavor(), equalTo(dockerBuild.build.flavor())); + assertThat(post70.build.flavor(), equalTo(dockerBuild.build.flavor())); + + assertThat(pre63.build.type(), equalTo(Build.Type.UNKNOWN)); + assertThat(post63pre67.build.type(), equalTo(Build.Type.TAR)); + assertThat(post67pre70.build.type(), equalTo(dockerBuild.build.type())); + assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); + + assertThat(pre63.build.getQualifiedVersion(), equalTo(pre63Version.toString())); + assertThat(post63pre67.build.getQualifiedVersion(), equalTo(post63Pre67Version.toString())); + assertThat(post67pre70.build.getQualifiedVersion(), equalTo(post67Pre70Version.toString())); + assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); + } + + public void testFlavorParsing() { + for (final Build.Flavor flavor : Build.Flavor.values()) { + // strict or not should not impact parsing at all here + assertThat(Build.Flavor.fromDisplayName(flavor.displayName(), randomBoolean()), sameInstance(flavor)); + } + } + + public void testTypeParsing() { + for (final Build.Type type : Build.Type.values()) { + // strict or not should not impact parsing at all here + assertThat(Build.Type.fromDisplayName(type.displayName(), randomBoolean()), sameInstance(type)); + } + } + + public void testLenientFlavorParsing() { + final String displayName = randomAlphaOfLength(8); + assertThat(Build.Flavor.fromDisplayName(displayName, false), equalTo(Build.Flavor.UNKNOWN)); + } + + public void testStrictFlavorParsing() { + final String displayName = randomAlphaOfLength(8); + @SuppressWarnings("ResultOfMethodCallIgnored") final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> Build.Flavor.fromDisplayName(displayName, true)); + assertThat(e, hasToString(containsString("unexpected distribution flavor [" + displayName + "]; your distribution is broken"))); + } + + public void testLenientTypeParsing() { + final String displayName = randomAlphaOfLength(8); + assertThat(Build.Type.fromDisplayName(displayName, false), equalTo(Build.Type.UNKNOWN)); + } + + public void testStrictTypeParsing() { + final String displayName = randomAlphaOfLength(8); + @SuppressWarnings("ResultOfMethodCallIgnored") final IllegalStateException e = + expectThrows(IllegalStateException.class, () -> Build.Type.fromDisplayName(displayName, true)); + assertThat(e, hasToString(containsString("unexpected distribution type [" + displayName + "]; your distribution is broken"))); + } + } diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 8ac056aa41c2a..c13ff2b79725b 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -61,9 +61,12 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.recovery.RecoverFilesRecoveryException; @@ -812,6 +815,9 @@ public void testIds() { ids.put(150, CoordinationStateRejectedException.class); ids.put(151, SnapshotInProgressException.class); ids.put(152, NoSuchRemoteClusterException.class); + ids.put(153, RetentionLeaseAlreadyExistsException.class); + ids.put(154, RetentionLeaseNotFoundException.class); + ids.put(155, ShardNotInPrimaryModeException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 1d2a4ca6d5f75..2de2f259e6ff1 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch; import org.apache.commons.codec.DecoderException; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -183,4 +184,31 @@ public void testGroupByNullIndex() { ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures); assertThat(groupBy.length, equalTo(2)); } + + public void testUnwrapCorruption() { + final Throwable corruptIndexException = new CorruptIndexException("corrupt", "resource"); + assertThat(ExceptionsHelper.unwrapCorruption(corruptIndexException), equalTo(corruptIndexException)); + + final Throwable corruptionAsCause = new RuntimeException(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionAsCause), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressed = new RuntimeException(); + corruptionSuppressed.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressed), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressedOnCause = new RuntimeException(new RuntimeException()); + corruptionSuppressedOnCause.getCause().addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressedOnCause), equalTo(corruptIndexException)); + + final Throwable corruptionCauseOnSuppressed = new RuntimeException(); + corruptionCauseOnSuppressed.addSuppressed(new RuntimeException(corruptIndexException)); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionCauseOnSuppressed), equalTo(corruptIndexException)); + + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException()), nullValue()); + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException(new RuntimeException())), nullValue()); + + final Throwable withSuppressedException = new RuntimeException(); + withSuppressedException.addSuppressed(new RuntimeException()); + assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); + } } diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 08db8dfaf2100..3e4c9ada76b03 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -186,7 +186,7 @@ public void testMinCompatVersion() { // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_6_7_0; // TODO: remove this once min compat version is a constant instead of method + Version lastVersion = Version.V_6_8_0; // TODO: remove this once min compat version is a constant instead of method assertEquals(lastVersion.major, Version.V_7_0_0.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", lastVersion.minor, Version.V_7_0_0.minimumCompatibilityVersion().minor); @@ -346,7 +346,8 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); assertFalse(isCompatible(Version.V_6_6_0, Version.V_7_0_0)); - assertTrue(isCompatible(Version.V_6_7_0, Version.V_7_0_0)); + assertFalse(isCompatible(Version.V_6_7_0, Version.V_7_0_0)); + assertTrue(isCompatible(Version.V_6_8_0, Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_5_0)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java index 19dec5bcc59bc..92532b89a2f8f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponseTests.java @@ -45,7 +45,7 @@ protected ClusterStateResponse createTestInstance() { } clusterState = clusterStateBuilder.build(); } - return new ClusterStateResponse(clusterName, clusterState, randomNonNegativeLong(), randomBoolean()); + return new ClusterStateResponse(clusterName, clusterState, randomBoolean()); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 1b192edfda6e6..687b01680704e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -40,8 +40,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; @@ -73,6 +71,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -100,8 +99,6 @@ public void setUp() throws Exception { indexShard = mock(IndexShard.class); when(indexShard.getActiveOperationsCount()).thenReturn(0); - when(indexShard.getGlobalCheckpoint()).thenReturn(0L); - when(indexShard.seqNoStats()).thenReturn(new SeqNoStats(0L, 0L, 0L)); final ShardId shardId = new ShardId("index", "_na_", randomIntBetween(0, 3)); when(indexShard.shardId()).thenReturn(shardId); @@ -174,17 +171,16 @@ public void testOperationFailsWithNoBlock() { verify(indexShard, times(0)).flush(any(FlushRequest.class)); } - public void testOperationFailsWithGlobalCheckpointNotCaughtUp() { - final long maxSeqNo = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, Long.MAX_VALUE); - final long localCheckpoint = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, maxSeqNo); - final long globalCheckpoint = randomValueOtherThan(maxSeqNo, - () -> randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, localCheckpoint)); - when(indexShard.seqNoStats()).thenReturn(new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint)); - when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint); + public void testVerifyShardBeforeIndexClosing() throws Exception { + executeOnPrimaryOrReplica(); + verify(indexShard, times(1)).verifyShardBeforeIndexClosing(); + verify(indexShard, times(1)).flush(any(FlushRequest.class)); + } - IllegalStateException exception = expectThrows(IllegalStateException.class, this::executeOnPrimaryOrReplica); - assertThat(exception.getMessage(), equalTo("Global checkpoint [" + globalCheckpoint + "] mismatches maximum sequence number [" - + maxSeqNo + "] on index shard " + indexShard.shardId())); + public void testVerifyShardBeforeIndexClosingFailed() { + doThrow(new IllegalStateException("test")).when(indexShard).verifyShardBeforeIndexClosing(); + expectThrows(IllegalStateException.class, this::executeOnPrimaryOrReplica); + verify(indexShard, times(1)).verifyShardBeforeIndexClosing(); verify(indexShard, times(0)).flush(any(FlushRequest.class)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 05da57cc5da45..27e3ffefd6351 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -19,8 +19,6 @@ package org.elasticsearch.action.admin.indices.create; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -33,22 +31,17 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.InternalTestCluster; import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -58,11 +51,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -390,48 +380,4 @@ public void testIndexNameInResponse() { assertEquals("Should have index name in response", "foo", response.index()); } - public void testIndexWithUnknownSetting() throws Exception { - final int replicas = internalCluster().numDataNodes() - 1; - final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", replicas).build(); - client().admin().indices().prepareCreate("test").setSettings(settings).get(); - ensureGreen("test"); - final ClusterState state = client().admin().cluster().prepareState().get().getState(); - - final Set dataOrMasterNodeNames = new HashSet<>(); - for (final ObjectCursor node : state.nodes().getMasterAndDataNodes().values()) { - assertTrue(dataOrMasterNodeNames.add(node.value.getName())); - } - - final IndexMetaData metaData = state.getMetaData().index("test"); - internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - if (dataOrMasterNodeNames.contains(nodeName)) { - final MetaStateService metaStateService = internalCluster().getInstance(MetaStateService.class, nodeName); - final IndexMetaData brokenMetaData = - IndexMetaData - .builder(metaData) - .settings(Settings.builder().put(metaData.getSettings()).put("index.foo", true)) - .build(); - // so evil - metaStateService.writeIndexAndUpdateManifest("broken metadata", brokenMetaData); - } - return super.onNodeStopped(nodeName); - } - }); - ensureGreen(metaData.getIndex().getName()); // we have to wait for the index to show up in the metadata or we will fail in a race - final ClusterState stateAfterRestart = client().admin().cluster().prepareState().get().getState(); - - // the index should not be open after we restart and recover the broken index metadata - assertThat(stateAfterRestart.getMetaData().index(metaData.getIndex()).getState(), equalTo(IndexMetaData.State.CLOSE)); - - // try to open the index - final ElasticsearchException e = - expectThrows(ElasticsearchException.class, () -> client().admin().indices().prepareOpen("test").get()); - assertThat(e, hasToString(containsString("Failed to verify index " + metaData.getIndex()))); - assertNotNull(e.getCause()); - assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(e, hasToString(containsString("unknown setting [index.foo]"))); - } - } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index feeb9646e40bf..b14bdd0ed9883 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -65,7 +65,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; -import java.util.List; import java.util.Map; import java.util.stream.IntStream; @@ -165,11 +164,8 @@ public void testCreateShrinkIndexToN() { } public void testShrinkIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(2, 3, 5, 7); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size() - 1), factors); - final int numberOfShards = numberOfShardsFactors.stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = randomSubsetOf(randomInt(numberOfShardsFactors.size() - 1), numberOfShardsFactors) - .stream().reduce(1, (x, y) -> x * y); + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); @@ -218,7 +214,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { final Settings.Builder prepareShrinkSettings = Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true); client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData indexMetaData = indexMetaData(client(), "source"); final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); @@ -228,7 +224,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); final IndexMetaData afterShrinkIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 0cc83cee89786..adbe2e2589020 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -45,8 +45,8 @@ import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -62,7 +62,6 @@ import java.io.UncheckedIOException; import java.util.Arrays; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.IntStream; @@ -75,7 +74,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SplitIndexIT extends ESIntegTestCase { @@ -185,9 +183,6 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha } } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); ensureYellow(); client().admin().indices().prepareUpdateSettings("source") .setSettings(Settings.builder() @@ -288,19 +283,13 @@ public void assertAllUniqueDocs(SearchResponse response, int numDocs) { } public void testSplitIndexPrimaryTerm() throws Exception { - final List factors = Arrays.asList(1, 2, 4, 8); - final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors); - final int numberOfShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y); - final int numberOfTargetShards = numberOfShardsFactors.stream().reduce(2, (x, y) -> x * y); + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) .put("number_of_shards", numberOfShards) .put("index.number_of_routing_shards", numberOfTargetShards)).get(); - - final ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); - ensureYellow(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards // fail random primary shards to force primary terms to increase final Index source = resolveIndex("source"); @@ -353,7 +342,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { .setResizeType(ResizeType.SPLIT) .setSettings(splitSettings).get()); - ensureGreen(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards final IndexMetaData aftersplitIndexMetaData = indexMetaData(client(), "target"); for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { @@ -366,7 +355,7 @@ private static IndexMetaData indexMetaData(final Client client, final String ind return clusterStateResponse.getState().metaData().index(index); } - public void testCreateSplitIndex() { + public void testCreateSplitIndex() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_rc2, Version.CURRENT); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) @@ -378,9 +367,6 @@ public void testCreateSplitIndex() { client().prepareIndex("source", "type") .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = - client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. @@ -486,9 +472,6 @@ public void testCreateSplitWithIndexSort() throws Exception { client().prepareIndex("source", "type", Integer.toString(i)) .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } - ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() - .getDataNodes(); - assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due // to the require._name below. diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingRequestValidatorsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingRequestValidatorsTests.java new file mode 100644 index 0000000000000..1b333ce0288e4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingRequestValidatorsTests.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.mapping.put; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class TransportPutMappingRequestValidatorsTests extends ESTestCase { + + private final MappingRequestValidator EMPTY = (request, state, indices) -> null; + private final MappingRequestValidator FAIL = (request, state, indices) -> new Exception("failure"); + + public void testValidates() { + final int numberOfValidations = randomIntBetween(0, 8); + final List validators = new ArrayList<>(numberOfValidations); + for (int i = 0; i < numberOfValidations; i++) { + validators.add(EMPTY); + } + final TransportPutMappingAction.RequestValidators requestValidators = new TransportPutMappingAction.RequestValidators(validators); + assertNull(requestValidators.validateRequest(null, null, null)); + } + + public void testFailure() { + final TransportPutMappingAction.RequestValidators validators = + new TransportPutMappingAction.RequestValidators(Collections.singletonList(FAIL)); + assertNotNull(validators.validateRequest(null, null, null)); + } + + public void testValidatesAfterFailure() { + final TransportPutMappingAction.RequestValidators validators = + new TransportPutMappingAction.RequestValidators(Collections.unmodifiableList(Arrays.asList(FAIL, EMPTY))); + assertNotNull(validators.validateRequest(null, null, null)); + } + + public void testMultipleFailures() { + final int numberOfFailures = randomIntBetween(2, 8); + final List validators = new ArrayList<>(numberOfFailures); + for (int i = 0; i < numberOfFailures; i++) { + validators.add(FAIL); + } + final TransportPutMappingAction.RequestValidators requestValidators = new TransportPutMappingAction.RequestValidators(validators); + final Exception e = requestValidators.validateRequest(null, null, null); + assertNotNull(e); + assertThat(e.getSuppressed(), Matchers.arrayWithSize(numberOfFailures - 1)); + } + + public void testRandom() { + final int numberOfValidations = randomIntBetween(0, 8); + final int numberOfFailures = randomIntBetween(0, 8); + final List validators = new ArrayList<>(numberOfValidations + numberOfFailures); + for (int i = 0; i < numberOfValidations; i++) { + validators.add(EMPTY); + } + for (int i = 0; i < numberOfFailures; i++) { + validators.add(FAIL); + } + Randomness.shuffle(validators); + final TransportPutMappingAction.RequestValidators requestValidators = new TransportPutMappingAction.RequestValidators(validators); + final Exception e = requestValidators.validateRequest(null, null, null); + if (numberOfFailures == 0) { + assertNull(e); + } else { + assertNotNull(e); + assertThat(e.getSuppressed(), Matchers.arrayWithSize(numberOfFailures - 1)); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index ec3c82ba70b2f..058dcc7243029 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -20,17 +20,30 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -39,9 +52,12 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; import org.mockito.ArgumentCaptor; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -51,7 +67,9 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -64,7 +82,9 @@ public void testDocStatsSelectionFromPrimariesOnly() { long docsInShards = 200; final Condition condition = createTestCondition(); - evaluateConditions(Sets.newHashSet(condition), createMetaData(), createIndicesStatResponse(docsInShards, docsInPrimaryShards)); + String indexName = randomAlphaOfLengthBetween(5, 7); + evaluateConditions(Sets.newHashSet(condition), createMetaData(indexName), + createIndicesStatResponse(indexName, docsInShards, docsInPrimaryShards)); final ArgumentCaptor argument = ArgumentCaptor.forClass(Condition.Stats.class); verify(condition).evaluate(argument.capture()); @@ -286,7 +306,7 @@ public void testRejectDuplicateAlias() { .patterns(Arrays.asList("foo-*", "bar-*")) .putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write").writeIndex(randomBoolean())) .build(); - final MetaData metaData = MetaData.builder().put(createMetaData(), false).put(template).build(); + final MetaData metaData = MetaData.builder().put(createMetaData(randomAlphaOfLengthBetween(5, 7)), false).put(template).build(); String indexName = randomFrom("foo-123", "bar-xyz"); String aliasName = randomFrom("foo-write", "bar-write"); final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, @@ -294,9 +314,92 @@ public void testRejectDuplicateAlias() { assertThat(ex.getMessage(), containsString("index template [test-template]")); } - private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long primaryDocs) { + public void testConditionEvaluationWhenAliasToWriteAndReadIndicesConsidersOnlyPrimariesFromWriteIndex() { + final TransportService mockTransportService = mock(TransportService.class); + final ClusterService mockClusterService = mock(ClusterService.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + final MetaDataCreateIndexService mockCreateIndexService = mock(MetaDataCreateIndexService.class); + final IndexNameExpressionResolver mockIndexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + when(mockIndexNameExpressionResolver.resolveDateMathExpression(any())).thenReturn("logs-index-000003"); + final ActionFilters mockActionFilters = mock(ActionFilters.class); + final MetaDataIndexAliasesService mdIndexAliasesService = mock(MetaDataIndexAliasesService.class); + + final Client mockClient = mock(Client.class); + final AdminClient mockAdminClient = mock(AdminClient.class); + final IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class); + when(mockClient.admin()).thenReturn(mockAdminClient); + when(mockAdminClient.indices()).thenReturn(mockIndicesAdminClient); + + final IndicesStatsRequestBuilder mockIndicesStatsBuilder = mock(IndicesStatsRequestBuilder.class); + when(mockIndicesAdminClient.prepareStats(any())).thenReturn(mockIndicesStatsBuilder); + final Map indexStats = new HashMap<>(); + int total = randomIntBetween(500, 1000); + indexStats.put("logs-index-000001", createIndexStats(200L, total)); + indexStats.put("logs-index-000002", createIndexStats(300L, total)); + final IndicesStatsResponse statsResponse = createAliasToMultipleIndicesStatsResponse(indexStats); + when(mockIndicesStatsBuilder.clear()).thenReturn(mockIndicesStatsBuilder); + when(mockIndicesStatsBuilder.setDocs(true)).thenReturn(mockIndicesStatsBuilder); + + assert statsResponse.getPrimaries().getDocs().getCount() == 500L; + assert statsResponse.getTotal().getDocs().getCount() == (total + total); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 1; + ActionListener listener = (ActionListener) args[0]; + listener.onResponse(statsResponse); + return null; + }).when(mockIndicesStatsBuilder).execute(any(ActionListener.class)); + + final IndexMetaData.Builder indexMetaData = IndexMetaData.builder("logs-index-000001") + .putAlias(AliasMetaData.builder("logs-alias").writeIndex(false).build()).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1); + final IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("logs-index-000002") + .putAlias(AliasMetaData.builder("logs-alias").writeIndex(true).build()).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1); + final ClusterState stateBefore = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + final TransportRolloverAction transportRolloverAction = new TransportRolloverAction(mockTransportService, mockClusterService, + mockThreadPool, mockCreateIndexService, mockActionFilters, mockIndexNameExpressionResolver, mdIndexAliasesService, + mockClient); + + // For given alias, verify that condition evaluation fails when the condition doc count is greater than the primaries doc count + // (primaries from only write index is considered) + PlainActionFuture future = new PlainActionFuture<>(); + RolloverRequest rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003"); + rolloverRequest.addMaxIndexDocsCondition(500L); + rolloverRequest.dryRun(true); + transportRolloverAction.masterOperation(rolloverRequest, stateBefore, future); + + RolloverResponse response = future.actionGet(); + assertThat(response.getOldIndex(), equalTo("logs-index-000002")); + assertThat(response.getNewIndex(), equalTo("logs-index-000003")); + assertThat(response.isDryRun(), equalTo(true)); + assertThat(response.isRolledOver(), equalTo(false)); + assertThat(response.getConditionStatus().size(), equalTo(1)); + assertThat(response.getConditionStatus().get("[max_docs: 500]"), is(false)); + + // For given alias, verify that the condition evaluation is successful when condition doc count is less than the primaries doc count + // (primaries from only write index is considered) + future = new PlainActionFuture<>(); + rolloverRequest = new RolloverRequest("logs-alias", "logs-index-000003"); + rolloverRequest.addMaxIndexDocsCondition(300L); + rolloverRequest.dryRun(true); + transportRolloverAction.masterOperation(rolloverRequest, stateBefore, future); + + response = future.actionGet(); + assertThat(response.getOldIndex(), equalTo("logs-index-000002")); + assertThat(response.getNewIndex(), equalTo("logs-index-000003")); + assertThat(response.isDryRun(), equalTo(true)); + assertThat(response.isRolledOver(), equalTo(false)); + assertThat(response.getConditionStatus().size(), equalTo(1)); + assertThat(response.getConditionStatus().get("[max_docs: 300]"), is(true)); + } + + private IndicesStatsResponse createIndicesStatResponse(String indexName, long totalDocs, long primariesDocs) { final CommonStats primaryStats = mock(CommonStats.class); - when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000))); + when(primaryStats.getDocs()).thenReturn(new DocsStats(primariesDocs, 0, between(1, 10000))); final CommonStats totalStats = mock(CommonStats.class); when(totalStats.getDocs()).thenReturn(new DocsStats(totalDocs, 0, between(1, 10000))); @@ -304,18 +407,49 @@ private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long prim final IndicesStatsResponse response = mock(IndicesStatsResponse.class); when(response.getPrimaries()).thenReturn(primaryStats); when(response.getTotal()).thenReturn(totalStats); + final IndexStats indexStats = mock(IndexStats.class); + when(response.getIndex(indexName)).thenReturn(indexStats); + when(indexStats.getPrimaries()).thenReturn(primaryStats); + when(indexStats.getTotal()).thenReturn(totalStats); + return response; + } + + private IndicesStatsResponse createAliasToMultipleIndicesStatsResponse(Map indexStats) { + final IndicesStatsResponse response = mock(IndicesStatsResponse.class); + final CommonStats primariesStats = new CommonStats(); + final CommonStats totalStats = new CommonStats(); + for (String indexName : indexStats.keySet()) { + when(response.getIndex(indexName)).thenReturn(indexStats.get(indexName)); + primariesStats.add(indexStats.get(indexName).getPrimaries()); + totalStats.add(indexStats.get(indexName).getTotal()); + } + when(response.getPrimaries()).thenReturn(primariesStats); + when(response.getTotal()).thenReturn(totalStats); return response; } - private static IndexMetaData createMetaData() { + private IndexStats createIndexStats(long primaries, long total) { + final CommonStats primariesCommonStats = mock(CommonStats.class); + when(primariesCommonStats.getDocs()).thenReturn(new DocsStats(primaries, 0, between(1, 10000))); + + final CommonStats totalCommonStats = mock(CommonStats.class); + when(totalCommonStats.getDocs()).thenReturn(new DocsStats(total, 0, between(1, 10000))); + + IndexStats indexStats = mock(IndexStats.class); + when(indexStats.getPrimaries()).thenReturn(primariesCommonStats); + when(indexStats.getTotal()).thenReturn(totalCommonStats); + return indexStats; + } + + private static IndexMetaData createMetaData(String indexName) { final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); - return IndexMetaData.builder(randomAlphaOfLength(10)) + return IndexMetaData.builder(indexName) .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(3).getMillis()) .settings(settings) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java index 83ddb45655182..5732630e20cef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.test.AbstractStreamableTestCase; import org.elasticsearch.test.ESTestCase; @@ -32,6 +33,7 @@ import java.util.Locale; import java.util.Set; import java.util.StringJoiner; +import java.util.function.Supplier; public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTestCase { @@ -39,9 +41,10 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { UpdateSettingsRequest mutation = copyRequest(request); List mutators = new ArrayList<>(); + Supplier timeValueSupplier = () -> TimeValue.parseTimeValue(ESTestCase.randomTimeValue(), "_setting"); mutators.add(() -> mutation - .masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); - mutators.add(() -> mutation.timeout(randomValueOtherThan(request.timeout().getStringRep(), ESTestCase::randomTimeValue))); + .masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout(), timeValueSupplier))); + mutators.add(() -> mutation.timeout(randomValueOtherThan(request.timeout(), timeValueSupplier))); mutators.add(() -> mutation.settings(mutateSettings(request.settings()))); mutators.add(() -> mutation.indices(mutateIndices(request.indices()))); mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java index a7e3ee57a08c3..99850699ec2be 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -19,16 +19,30 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.test.ESTestCase; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; import static org.hamcrest.object.HasToString.hasToString; - public class IndicesStatsResponseTests extends ESTestCase { public void testInvalidLevel() { @@ -42,4 +56,59 @@ public void testInvalidLevel() { hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"))); } + public void testGetIndices() { + List shards = new ArrayList<>(); + int noOfIndexes = randomIntBetween(2, 5); + List expectedIndexes = new ArrayList<>(); + Map expectedIndexToPrimaryShardsCount = new HashMap<>(); + Map expectedIndexToTotalShardsCount = new HashMap<>(); + + for (int indCnt = 0; indCnt < noOfIndexes; indCnt++) { + Index index = createIndex(randomAlphaOfLength(9)); + expectedIndexes.add(index.getName()); + int numShards = randomIntBetween(1, 5); + for (int shardId = 0; shardId < numShards; shardId++) { + ShardId shId = new ShardId(index, shardId); + Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardId)); + ShardPath shardPath = new ShardPath(false, path, path, shId); + ShardRouting routing = createShardRouting(index, shId, (shardId == 0)); + shards.add(new ShardStats(routing, shardPath, null, null, null, null)); + AtomicLong primaryShardsCounter = expectedIndexToPrimaryShardsCount.computeIfAbsent(index.getName(), + k -> new AtomicLong(0L)); + if (routing.primary()) { + primaryShardsCounter.incrementAndGet(); + } + AtomicLong shardsCounter = expectedIndexToTotalShardsCount.computeIfAbsent(index.getName(), k -> new AtomicLong(0L)); + shardsCounter.incrementAndGet(); + } + } + final IndicesStatsResponse indicesStatsResponse = new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), 0, 0, 0, + null); + Map indexStats = indicesStatsResponse.getIndices(); + + assertThat(indexStats.size(), is(noOfIndexes)); + assertThat(indexStats.keySet(), containsInAnyOrder(expectedIndexes.toArray(new String[0]))); + + for (String index : indexStats.keySet()) { + IndexStats stat = indexStats.get(index); + ShardStats[] shardStats = stat.getShards(); + long primaryCount = 0L; + long totalCount = shardStats.length; + for (ShardStats shardStat : shardStats) { + if (shardStat.getShardRouting().primary()) { + primaryCount++; + } + } + assertThat(primaryCount, is(expectedIndexToPrimaryShardsCount.get(index).get())); + assertThat(totalCount, is(expectedIndexToTotalShardsCount.get(index).get())); + } + } + + private ShardRouting createShardRouting(Index index, ShardId shardId, boolean isPrimary) { + return TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(4), isPrimary, ShardRoutingState.STARTED); + } + + private Index createIndex(String indexName) { + return new Index(indexName, UUIDs.base64UUID()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java new file mode 100644 index 0000000000000..fbcd5c46e2f90 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.action.document.RestBulkAction; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class BulkRequestParserTests extends ESTestCase { + + public void testIndexRequest() throws IOException { + BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + final AtomicBoolean parsed = new AtomicBoolean(); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, + indexRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", indexRequest.index()); + assertEquals("bar", indexRequest.id()); + parsed.set(true); + }, + req -> fail(), req -> fail()); + assertTrue(parsed.get()); + } + + public void testDeleteRequest() throws IOException { + BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); + BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + final AtomicBoolean parsed = new AtomicBoolean(); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, + req -> fail(), req -> fail(), + deleteRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", deleteRequest.index()); + assertEquals("bar", deleteRequest.id()); + parsed.set(true); + }); + assertTrue(parsed.get()); + } + + public void testUpdateRequest() throws IOException { + BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + final AtomicBoolean parsed = new AtomicBoolean(); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, + req -> fail(), + updateRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", updateRequest.index()); + assertEquals("bar", updateRequest.id()); + parsed.set(true); + }, + req -> fail()); + assertTrue(parsed.get()); + } + + public void testBarfOnLackOfTrailingNewline() throws IOException { + BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}"); + BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, + indexRequest -> fail(), req -> fail(), req -> fail())); + assertEquals("The bulk request must be terminated by a newline [\\n]", e.getMessage()); + } + + public void testFailOnExplicitIndex() throws IOException { + BytesArray request = new BytesArray("{ \"index\":{ \"_index\": \"foo\", \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, + req -> fail(), req -> fail(), req -> fail())); + assertEquals("explicit index in bulk is not allowed", ex.getMessage()); + } + + public void testTypeWarning() throws IOException { + BytesArray request = new BytesArray("{ \"index\":{ \"_type\": \"quux\", \"_id\": \"bar\" } }\n{}\n"); + BulkRequestParser parser = new BulkRequestParser(true); + final AtomicBoolean parsed = new AtomicBoolean(); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, + indexRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", indexRequest.index()); + assertEquals("bar", indexRequest.id()); + parsed.set(true); + }, + req -> fail(), req -> fail()); + assertTrue(parsed.get()); + + assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 6d3e4c04c13d7..ebd6590a80cca 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -352,7 +352,7 @@ public void testBulkTerminatedByNewline() throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk11.json"); IllegalArgumentException expectThrows = expectThrows(IllegalArgumentException.class, () -> new BulkRequest() .add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON)); - assertEquals("The bulk request must be terminated by a newline [\n]", expectThrows.getMessage()); + assertEquals("The bulk request must be terminated by a newline [\\n]", expectThrows.getMessage()); String bulkActionWithNewLine = bulkAction + "\n"; BulkRequest bulkRequestWithNewLine = new BulkRequest(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 219aee9ebe2ff..a13e8af919b2a 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -57,6 +58,7 @@ import org.mockito.Captor; import org.mockito.MockitoAnnotations; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.Map; @@ -460,7 +462,7 @@ public void testUseDefaultPipelineWithBulkUpsert() throws Exception { verifyZeroInteractions(transportService); } - public void testCreateIndexBeforeRunPipeline() throws Exception { + public void testDoExecuteCalledTwiceCorrectly() throws Exception { Exception exception = new Exception("fake exception"); IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); indexRequest.setPipeline("testpipeline"); @@ -478,20 +480,76 @@ public void testCreateIndexBeforeRunPipeline() throws Exception { // check failure works, and passes through to the listener assertFalse(action.isExecuted); // haven't executed yet + assertFalse(action.indexCreated); // no index yet assertFalse(responseCalled.get()); assertFalse(failureCalled.get()); verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); completionHandler.getValue().accept(exception); + assertFalse(action.indexCreated); // still no index yet, the ingest node failed. assertTrue(failureCalled.get()); // now check success indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing completionHandler.getValue().accept(null); assertTrue(action.isExecuted); + assertTrue(action.indexCreated); // now the index is created since we skipped the ingest node path. assertFalse(responseCalled.get()); // listener would only be called by real index action, not our mocked one verifyZeroInteractions(transportService); } + public void testNotFindDefaultPipelineFromTemplateMatches(){ + Exception exception = new Exception("fake exception"); + IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + indexRequest.source(Collections.emptyMap()); + AtomicBoolean responseCalled = new AtomicBoolean(false); + AtomicBoolean failureCalled = new AtomicBoolean(false); + singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + response -> responseCalled.set(true), + e -> { + assertThat(e, sameInstance(exception)); + failureCalled.set(true); + })); + assertEquals(IngestService.NOOP_PIPELINE_NAME, indexRequest.getPipeline()); + verifyZeroInteractions(ingestService); + + } + + public void testFindDefaultPipelineFromTemplateMatch(){ + Exception exception = new Exception("fake exception"); + ClusterState state = clusterService.state(); + + ImmutableOpenMap.Builder templateMetaDataBuilder = ImmutableOpenMap.builder(); + templateMetaDataBuilder.put("template1", IndexTemplateMetaData.builder("template1").patterns(Arrays.asList("missing_index")) + .order(1).settings(Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "pipeline1").build()).build()); + templateMetaDataBuilder.put("template2", IndexTemplateMetaData.builder("template2").patterns(Arrays.asList("missing_*")) + .order(2).settings(Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "pipeline2").build()).build()); + templateMetaDataBuilder.put("template3", IndexTemplateMetaData.builder("template3").patterns(Arrays.asList("missing*")) + .order(3).build()); + templateMetaDataBuilder.put("template4", IndexTemplateMetaData.builder("template4").patterns(Arrays.asList("nope")) + .order(4).settings(Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "pipeline4").build()).build()); + + MetaData metaData = mock(MetaData.class); + when(state.metaData()).thenReturn(metaData); + when(state.getMetaData()).thenReturn(metaData); + when(metaData.templates()).thenReturn(templateMetaDataBuilder.build()); + when(metaData.getTemplates()).thenReturn(templateMetaDataBuilder.build()); + when(metaData.indices()).thenReturn(ImmutableOpenMap.of()); + + IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + indexRequest.source(Collections.emptyMap()); + AtomicBoolean responseCalled = new AtomicBoolean(false); + AtomicBoolean failureCalled = new AtomicBoolean(false); + singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + response -> responseCalled.set(true), + e -> { + assertThat(e, sameInstance(exception)); + failureCalled.set(true); + })); + + assertEquals("pipeline2", indexRequest.getPipeline()); + verify(ingestService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture(), any()); + } + private void validateDefaultPipeline(IndexRequest indexRequest) { Exception exception = new Exception("fake exception"); indexRequest.source(Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 37e82884c5133..610a72de6ecfd 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -144,6 +144,8 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); + UpdateHelper updateHelper = null; BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -169,6 +171,8 @@ public void testExecuteBulkIndexRequest() throws Exception { items[0] = primaryRequest; bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); + BulkPrimaryExecutionContext secondContext = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(secondContext, updateHelper, threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail")), () -> {}); @@ -271,6 +275,8 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(mappingUpdate); + randomlySetIgnoredPrimaryResponse(items[0]); + // Pretend the mappings haven't made it to the node yet BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); AtomicInteger updateCalled = new AtomicInteger(); @@ -285,8 +291,8 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1)); - // Verify that the shard "executed" the operation twice - verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean()); + // Verify that the shard "executed" the operation once + verify(shard, times(1)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean()); when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(success); @@ -295,9 +301,9 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { (update, shardId, type) -> fail("should not have had to update the mappings"), () -> {}); - // Verify that the shard "executed" the operation only once (2 for previous invocations plus + // Verify that the shard "executed" the operation only once (1 for previous invocations plus // 1 for this execution) - verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean()); + verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean()); BulkItemResponse primaryResponse = bulkShardRequest.items()[0].getPrimaryResponse(); @@ -326,6 +332,8 @@ public void testExecuteBulkIndexRequestWithErrorWhileUpdatingMapping() throws Ex boolean errorOnWait = randomBoolean(); + randomlySetIgnoredPrimaryResponse(items[0]); + BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, errorOnWait == false ? new ThrowingMappingUpdatePerformer(err) : new NoopMappingUpdatePerformer(), @@ -365,6 +373,8 @@ public void testExecuteBulkDeleteRequest() throws Exception { Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; + randomlySetIgnoredPrimaryResponse(items[0]); + BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), () -> {}); @@ -405,6 +415,8 @@ public void testExecuteBulkDeleteRequest() throws Exception { location = context.getLocationToSync(); + randomlySetIgnoredPrimaryResponse(items[0]); + context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), () -> {}); @@ -459,6 +471,8 @@ public void testNoopUpdateRequest() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); + BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer(), () -> {}); @@ -503,6 +517,7 @@ public void testUpdateRequestWithFailure() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -535,7 +550,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0); IndexShard shard = mock(IndexShard.class); @@ -552,6 +567,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -598,6 +614,7 @@ public void testUpdateRequestWithSuccess() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -643,6 +660,7 @@ public void testUpdateWithDelete() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -676,6 +694,7 @@ public void testFailureDuringUpdateProcessing() throws Exception { BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + randomlySetIgnoredPrimaryResponse(primaryRequest); BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(bulkShardRequest, shard); TransportShardBulkAction.executeBulkItemRequest(context, updateHelper, threadPool::absoluteTimeInMillis, @@ -765,7 +784,7 @@ public void testRetries() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0, 0); Engine.IndexResult mappingUpdate = @@ -809,6 +828,14 @@ public void testRetries() throws Exception { assertThat(response.getSeqNo(), equalTo(13L)); } + private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { + if (randomBoolean()) { + // add a response to the request and thereby check that it is ignored for the primary. + primaryRequest.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, new IndexResponse(null, "_doc", + "ignore-primary-response-on-primary", 42, 42, 42, false))); + } + } + /** * Fake IndexResult that has a settable translog location */ diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 70f70268a0a03..16df17bef1ada 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -19,32 +19,49 @@ package org.elasticsearch.action.search; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; public class AbstractSearchAsyncActionTests extends ESTestCase { - private AbstractSearchAsyncAction createAction( - final boolean controlled, - final AtomicLong expected) { + private final List> resolvedNodes = new ArrayList<>(); + private final Set releasedContexts = new CopyOnWriteArraySet<>(); + private AbstractSearchAsyncAction createAction(SearchRequest request, + InitialSearchPhase.ArraySearchPhaseResults results, + ActionListener listener, + final boolean controlled, + final AtomicLong expected) { final Runnable runnable; final TransportSearchAction.SearchTimeProvider timeProvider; if (controlled) { @@ -61,18 +78,20 @@ private AbstractSearchAsyncAction createAction( System::nanoTime); } - final SearchRequest request = new SearchRequest(); - request.allowPartialSearchResults(true); - request.preference("_shards:1,3"); - return new AbstractSearchAsyncAction("test", null, null, null, + BiFunction nodeIdToConnection = (cluster, node) -> { + resolvedNodes.add(Tuple.tuple(cluster, node)); + return null; + }; + + return new AbstractSearchAsyncAction("test", null, null, nodeIdToConnection, Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), - Collections.singletonMap("name", Sets.newHashSet("bar", "baz")),null, request, null, + Collections.singletonMap("name", Sets.newHashSet("bar", "baz")), null, request, listener, new GroupShardsIterator<>( Collections.singletonList( new SearchShardIterator(null, null, Collections.emptyList(), null) ) ), timeProvider, 0, null, - new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests(), + results, request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, final SearchPhaseContext context) { @@ -89,6 +108,11 @@ long buildTookInMillis() { runnable.run(); return super.buildTookInMillis(); } + + @Override + public void sendReleaseSearchContext(long contextId, Transport.Connection connection, OriginalIndices originalIndices) { + releasedContexts.add(contextId); + } }; } @@ -102,7 +126,8 @@ public void testTookWithRealClock() { private void runTestTook(final boolean controlled) { final AtomicLong expected = new AtomicLong(); - AbstractSearchAsyncAction action = createAction(controlled, expected); + AbstractSearchAsyncAction action = createAction(new SearchRequest(), + new InitialSearchPhase.ArraySearchPhaseResults<>(10), null, controlled, expected); final long actual = action.buildTookInMillis(); if (controlled) { // with a controlled clock, we can assert the exact took time @@ -114,8 +139,10 @@ private void runTestTook(final boolean controlled) { } public void testBuildShardSearchTransportRequest() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()).preference("_shards:1,3"); final AtomicLong expected = new AtomicLong(); - AbstractSearchAsyncAction action = createAction(false, expected); + AbstractSearchAsyncAction action = createAction(searchRequest, + new InitialSearchPhase.ArraySearchPhaseResults<>(10), null, false, expected); String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); SearchShardIterator iterator = new SearchShardIterator(clusterAlias, new ShardId(new Index("name", "foo"), 1), Collections.emptyList(), new OriginalIndices(new String[] {"name", "name1"}, IndicesOptions.strictExpand())); @@ -129,4 +156,114 @@ public void testBuildShardSearchTransportRequest() { assertEquals("_shards:1,3", shardSearchTransportRequest.preference()); assertEquals(clusterAlias, shardSearchTransportRequest.getClusterAlias()); } + + public void testBuildSearchResponse() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); + AbstractSearchAsyncAction action = createAction(searchRequest, + new InitialSearchPhase.ArraySearchPhaseResults<>(10), null, false, new AtomicLong()); + String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId); + assertEquals(scrollId, searchResponse.getScrollId()); + assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); + assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); + assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); + assertSame(searchResponse.getHits(), internalSearchResponse.hits()); + } + + public void testBuildSearchResponseAllowPartialFailures() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + AbstractSearchAsyncAction action = createAction(searchRequest, + new InitialSearchPhase.ArraySearchPhaseResults<>(10), null, false, new AtomicLong()); + action.onShardFailure(0, new SearchShardTarget("node", new ShardId("index", "index-uuid", 0), null, OriginalIndices.NONE), + new IllegalArgumentException()); + String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId); + assertEquals(scrollId, searchResponse.getScrollId()); + assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); + assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); + assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); + assertSame(searchResponse.getHits(), internalSearchResponse.hits()); + } + + public void testBuildSearchResponseDisallowPartialFailures() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + Set requestIds = new HashSet<>(); + List> nodeLookups = new ArrayList<>(); + int numFailures = randomIntBetween(1, 5); + InitialSearchPhase.ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, numFailures); + AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + for (int i = 0; i < numFailures; i++) { + ShardId failureShardId = new ShardId("index", "index-uuid", i); + String failureClusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + String failureNodeId = randomAlphaOfLengthBetween(5, 10); + action.onShardFailure(i, new SearchShardTarget(failureNodeId, failureShardId, failureClusterAlias, OriginalIndices.NONE), + new IllegalArgumentException()); + } + action.buildSearchResponse(InternalSearchResponse.empty(), randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10)); + assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); + SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException)exception.get(); + assertEquals(0, searchPhaseExecutionException.getSuppressed().length); + assertEquals(numFailures, searchPhaseExecutionException.shardFailures().length); + for (ShardSearchFailure shardSearchFailure : searchPhaseExecutionException.shardFailures()) { + assertThat(shardSearchFailure.getCause(), instanceOf(IllegalArgumentException.class)); + } + assertEquals(nodeLookups, resolvedNodes); + assertEquals(requestIds, releasedContexts); + } + + public void testOnPhaseFailure() { + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + Set requestIds = new HashSet<>(); + List> nodeLookups = new ArrayList<>(); + InitialSearchPhase.ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); + AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); + SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException)exception.get(); + assertEquals("message", searchPhaseExecutionException.getMessage()); + assertEquals("test", searchPhaseExecutionException.getPhaseName()); + assertEquals(0, searchPhaseExecutionException.shardFailures().length); + assertEquals(0, searchPhaseExecutionException.getSuppressed().length); + assertEquals(nodeLookups, resolvedNodes); + assertEquals(requestIds, releasedContexts); + } + + private static InitialSearchPhase.ArraySearchPhaseResults phaseResults(Set requestIds, + List> nodeLookups, + int numFailures) { + int numResults = randomIntBetween(1, 10); + InitialSearchPhase.ArraySearchPhaseResults phaseResults = + new InitialSearchPhase.ArraySearchPhaseResults<>(numResults + numFailures); + + for (int i = 0; i < numResults; i++) { + long requestId = randomLong(); + requestIds.add(requestId); + SearchPhaseResult phaseResult = new PhaseResult(requestId); + String resultClusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + String resultNodeId = randomAlphaOfLengthBetween(5, 10); + ShardId resultShardId = new ShardId("index", "index-uuid", i); + nodeLookups.add(Tuple.tuple(resultClusterAlias, resultNodeId)); + phaseResult.setSearchShardTarget(new SearchShardTarget(resultNodeId, resultShardId, resultClusterAlias, OriginalIndices.NONE)); + phaseResult.setShardIndex(i); + phaseResults.consumeResult(phaseResult); + } + return phaseResults; + } + + private static final class PhaseResult extends SearchPhaseResult { + PhaseResult(long requestId) { + this.requestId = requestId; + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index 712d6a60440fe..1f0baa9d21f04 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -109,7 +109,14 @@ public void testMergeShardFailures() throws InterruptedException { SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, searchTimeProvider, flag -> null); - PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); + PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1, + (o1, o2) -> { + int compareTo = o1.getShardId().compareTo(o2.getShardId()); + if (compareTo != 0) { + return compareTo; + } + return o1.getClusterAlias().compareTo(o2.getClusterAlias()); + })); int numIndices = numResponses * randomIntBetween(1, 3); Iterator> indicesPerCluster = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); for (int i = 0; i < numResponses; i++) { @@ -120,15 +127,46 @@ public void testMergeShardFailures() throws InterruptedException { ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; for (int j = 0; j < numFailures; j++) { ShardId shardId = new ShardId(randomFrom(indices), j); - ShardSearchFailure failure; - if (randomBoolean()) { - SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias, null); - failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); - } else { - ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); - elasticsearchException.setShard(shardId); - failure = new ShardSearchFailure(elasticsearchException); - } + SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias, null); + ShardSearchFailure failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); + shardSearchFailures[j] = failure; + priorityQueue.add(Tuple.tuple(searchShardTarget, failure)); + } + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), null, + 1, 1, 0, 100L, shardSearchFailures, SearchResponse.Clusters.EMPTY); + addResponse(merger, searchResponse); + } + awaitResponsesAdded(); + assertEquals(numResponses, merger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertEquals(priorityQueue.size(), shardFailures.length); + for (ShardSearchFailure shardFailure : shardFailures) { + ShardSearchFailure expected = priorityQueue.poll().v2(); + assertSame(expected, shardFailure); + } + } + + public void testMergeShardFailuresNullShardTarget() throws InterruptedException { + SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); + SearchResponseMerger merger = new SearchResponseMerger(0, 0, SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, flag -> null); + PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); + for (int i = 0; i < numResponses; i++) { + int numFailures = randomIntBetween(1, 10); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + String index = "index-" + i; + ShardId shardId = new ShardId(index, index + "-uuid", j); + ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); + elasticsearchException.setShard(shardId); + ShardSearchFailure failure = new ShardSearchFailure(elasticsearchException); shardSearchFailures[j] = failure; priorityQueue.add(Tuple.tuple(shardId, failure)); } @@ -337,6 +375,7 @@ public void testMergeSearchHits() throws InterruptedException { float expectedMaxScore = Float.NEGATIVE_INFINITY; int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + boolean hasHits = false; for (int i = 0; i < numResponses; i++) { Map.Entry entry = indicesIterator.next(); String clusterAlias = entry.getKey(); @@ -360,6 +399,7 @@ public void testMergeSearchHits() throws InterruptedException { float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; SearchHit[] hits = randomSearchHitArray(numDocs, numResponses, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); + hasHits |= hits.length > 0; expectedMaxScore = Math.max(expectedMaxScore, maxScore); Object[] collapseValues = null; @@ -408,8 +448,14 @@ public void testMergeSearchHits() throws InterruptedException { assertNull(searchResponse.getScrollId()); SearchHits searchHits = searchResponse.getHits(); - assertArrayEquals(sortFields, searchHits.getSortFields()); - assertEquals(collapseField, searchHits.getCollapseField()); + // the sort fields and the collapse field are not returned when hits are empty + if (hasHits) { + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + } else { + assertNull(searchHits.getSortFields()); + assertNull(searchHits.getCollapseField()); + } if (expectedTotalHits == null) { assertNull(searchHits.getTotalHits()); } else { @@ -427,7 +473,9 @@ public void testMergeSearchHits() throws InterruptedException { priorityQueue.poll(); } SearchHit[] hits = searchHits.getHits(); - if (collapseField != null) { + if (collapseField != null + // the collapse field is not returned when hits are empty + && hasHits) { assertEquals(hits.length, searchHits.getCollapseValues().length); } else { assertNull(searchHits.getCollapseValues()); @@ -466,6 +514,72 @@ public void testMergeNoResponsesAdded() { assertEquals(0, response.getShardFailures().length); } + public void testMergeEmptySearchHitsWithNonEmpty() { + long currentRelativeTime = randomLong(); + final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, flag -> null); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + int numFields = randomIntBetween(1, 3); + SortField[] sortFields = new SortField[numFields]; + for (int i = 0; i < numFields; i++) { + sortFields[i] = new SortField("field-" + i, SortField.Type.INT, randomBoolean()); + } + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchHit[] hits = randomSearchHitArray(10, 1, "remote", new Index[]{new Index("index", "uuid")}, Float.NaN, 1, + sortFields, priorityQueue); + { + SearchHits searchHits = new SearchHits(hits, new TotalHits(10, TotalHits.Relation.EQUAL_TO), Float.NaN, sortFields, null, null); + InternalSearchResponse response = new InternalSearchResponse(searchHits, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse(response, null, 1, 1, 0, 1L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + merger.add(searchResponse); + } + { + SearchHits empty = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN, null, null, null); + InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse(response, null, 1, 1, 0, 1L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + merger.add(searchResponse); + } + assertEquals(2, merger.numResponses()); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertEquals(10, mergedResponse.getHits().getTotalHits().value); + assertEquals(10, mergedResponse.getHits().getHits().length); + assertEquals(2, mergedResponse.getTotalShards()); + assertEquals(2, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertArrayEquals(sortFields, mergedResponse.getHits().getSortFields()); + assertArrayEquals(hits, mergedResponse.getHits().getHits()); + assertEquals(clusters, mergedResponse.getClusters()); + } + + public void testMergeOnlyEmptyHits() { + long currentRelativeTime = randomLong(); + final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + Tuple randomTrackTotalHits = randomTrackTotalHits(); + int trackTotalHitsUpTo = randomTrackTotalHits.v1(); + TotalHits.Relation totalHitsRelation = randomTrackTotalHits.v2(); + SearchResponseMerger merger = new SearchResponseMerger(0, 10, trackTotalHitsUpTo, timeProvider, flag -> null); + int numResponses = randomIntBetween(1, 5); + TotalHits expectedTotalHits = null; + for (int i = 0; i < numResponses; i++) { + TotalHits totalHits = null; + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); + long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; + expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); + } + SearchHits empty = new SearchHits(new SearchHit[0], totalHits, Float.NaN, null, null, null); + InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse(response, null, 1, 1, 0, 1L, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + merger.add(searchResponse); + } + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + assertEquals(expectedTotalHits, mergedResponse.getHits().getTotalHits()); + } + private static Tuple randomTrackTotalHits() { switch(randomIntBetween(0, 2)) { case 0: diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 9a9524d0ff57e..2165895974e27 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -412,7 +412,8 @@ public void testCCSRemoteReduce() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[]{"index"}, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; int totalClusters = numClusters + (local ? 1 : 0); TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - Function reduceContext = finalReduce -> null; + Function reduceContext = + finalReduce -> new InternalAggregation.ReduceContext(null, null, finalReduce); try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); @@ -805,11 +806,17 @@ public void testShouldMinimizeRoundtrips() throws Exception { collapseBuilder.setInnerHits(new InnerHitBuilder("inner")); assertFalse(TransportSearchAction.shouldMinimizeRoundtrips(searchRequest)); } + { + SearchRequest searchRequest = new SearchRequest(); + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + assertFalse(TransportSearchAction.shouldMinimizeRoundtrips(searchRequest)); + } { SearchRequestTests searchRequestTests = new SearchRequestTests(); searchRequestTests.setUp(); SearchRequest searchRequest = searchRequestTests.createSearchRequest(); searchRequest.scroll((Scroll)null); + searchRequest.searchType(SearchType.QUERY_THEN_FETCH); SearchSourceBuilder source = searchRequest.source(); if (source != null) { CollapseBuilder collapse = source.collapse(); diff --git a/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java b/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java new file mode 100644 index 0000000000000..d5e3f0031c72f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class ListenerTimeoutsTests extends ESTestCase { + + private final TimeValue timeout = TimeValue.timeValueMillis(10); + private final String generic = ThreadPool.Names.GENERIC; + private DeterministicTaskQueue taskQueue; + + @Before + public void setUp() throws Exception { + super.setUp(); + Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), "node").build(); + taskQueue = new DeterministicTaskQueue(settings, random()); + } + + public void testListenerTimeout() { + AtomicBoolean success = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = wrap(success, exception); + + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + assertTrue(taskQueue.hasDeferredTasks()); + taskQueue.advanceTime(); + taskQueue.runAllRunnableTasks(); + + wrapped.onResponse(null); + wrapped.onFailure(new IOException("incorrect exception")); + + assertFalse(success.get()); + assertThat(exception.get(), instanceOf(ElasticsearchTimeoutException.class)); + } + + public void testFinishNormallyBeforeTimeout() { + AtomicBoolean success = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = wrap(success, exception); + + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + wrapped.onResponse(null); + wrapped.onFailure(new IOException("boom")); + wrapped.onResponse(null); + + assertTrue(taskQueue.hasDeferredTasks()); + taskQueue.advanceTime(); + taskQueue.runAllRunnableTasks(); + + assertTrue(success.get()); + assertNull(exception.get()); + } + + public void testFinishExceptionallyBeforeTimeout() { + AtomicBoolean success = new AtomicBoolean(false); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = wrap(success, exception); + + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + wrapped.onFailure(new IOException("boom")); + + assertTrue(taskQueue.hasDeferredTasks()); + taskQueue.advanceTime(); + taskQueue.runAllRunnableTasks(); + + assertFalse(success.get()); + assertThat(exception.get(), instanceOf(IOException.class)); + } + + private ActionListener wrap(AtomicBoolean success, AtomicReference exception) { + return new ActionListener() { + + private final AtomicBoolean completed = new AtomicBoolean(); + + @Override + public void onResponse(Void aVoid) { + assertTrue(completed.compareAndSet(false, true)); + assertTrue(success.compareAndSet(false, true)); + } + + @Override + public void onFailure(Exception e) { + assertTrue(completed.compareAndSet(false, true)); + assertTrue(exception.compareAndSet(null, e)); + } + }; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 8fa10c4ee26d7..02988e7981a29 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -21,14 +21,16 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -39,7 +41,10 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.SendRequestTransportException; +import org.elasticsearch.transport.TransportException; import java.util.ArrayList; import java.util.Collections; @@ -51,7 +56,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import java.util.function.Supplier; import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; @@ -115,10 +119,8 @@ public void testReplication() throws Exception { final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, simulatedFailures); final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup); - final TestReplicationOperation op = new TestReplicationOperation(request, - primary, listener, replicasProxy); + final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy); op.execute(); - assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); assertThat(replicasProxy.failedReplicas, equalTo(simulatedFailures.keySet())); @@ -162,7 +164,7 @@ private void addTrackingInfo(IndexShardRoutingTable indexShardRoutingTable, Shar } } - public void testDemotedPrimary() throws Exception { + public void testNoLongerPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -198,26 +200,37 @@ public void testDemotedPrimary() throws Exception { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); final boolean testPrimaryDemotedOnStaleShardCopies = randomBoolean(); + final Exception shardActionFailure; + if (randomBoolean()) { + shardActionFailure = new NodeClosedException(new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT)); + } else if (randomBoolean()) { + shardActionFailure = new SendRequestTransportException( + new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT), ShardStateAction.SHARD_FAILED_ACTION_NAME, + new TransportException("TransportService is closed stopped can't send request")); + } else if (randomBoolean()) { + shardActionFailure = new TransportException( + "transport stopped, action: " + ShardStateAction.SHARD_FAILED_ACTION_NAME); + } else { + shardActionFailure = new ShardStateAction.NoLongerPrimaryShardException(failedReplica.shardId(), "the king is dead"); + } final TestReplicaProxy replicasProxy = new TestReplicaProxy(primaryTerm, expectedFailures) { @Override public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, - Runnable onSuccess, Consumer onPrimaryDemoted, - Consumer onIgnoredFailure) { + ActionListener shardActionListener) { if (testPrimaryDemotedOnStaleShardCopies) { - super.failShardIfNeeded(replica, message, exception, onSuccess, onPrimaryDemoted, onIgnoredFailure); + super.failShardIfNeeded(replica, message, exception, shardActionListener); } else { assertThat(replica, equalTo(failedReplica)); - onPrimaryDemoted.accept(new ElasticsearchException("the king is dead")); + shardActionListener.onFailure(shardActionFailure); } } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener shardActionListener) { if (testPrimaryDemotedOnStaleShardCopies) { - onPrimaryDemoted.accept(new ElasticsearchException("the king is dead")); + shardActionListener.onFailure(shardActionFailure); } else { - super.markShardCopyAsStaleIfNeeded(shardId, allocationId, onSuccess, onPrimaryDemoted, onIgnoredFailure); + super.markShardCopyAsStaleIfNeeded(shardId, allocationId, shardActionListener); } } }; @@ -225,6 +238,7 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, R final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup) { @Override public void failShard(String message, Exception exception) { + assertThat(exception, instanceOf(ShardStateAction.NoLongerPrimaryShardException.class)); assertTrue(primaryFailed.compareAndSet(false, true)); } }; @@ -233,7 +247,11 @@ public void failShard(String message, Exception exception) { assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertTrue("listener is not marked as done", listener.isDone()); - assertTrue(primaryFailed.get()); + if (shardActionFailure instanceof ShardStateAction.NoLongerPrimaryShardException) { + assertTrue(primaryFailed.get()); + } else { + assertFalse(primaryFailed.get()); + } assertListenerThrows("should throw exception to trigger retry", listener, ReplicationOperation.RetryOnPrimaryException.class); } @@ -594,33 +612,23 @@ public void performOn( } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { if (failedReplicas.add(replica) == false) { fail("replica [" + replica + "] was failed twice"); } if (opFailures.containsKey(replica)) { - if (randomBoolean()) { - onSuccess.run(); - } else { - onIgnoredFailure.accept(new ElasticsearchException("simulated")); - } + listener.onResponse(null); } else { fail("replica [" + replica + "] was failed"); } } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { if (markedAsStaleCopies.add(allocationId) == false) { fail("replica [" + allocationId + "] was marked as stale twice"); } - if (randomBoolean()) { - onSuccess.run(); - } else { - onIgnoredFailure.accept(new ElasticsearchException("simulated")); - } + listener.onResponse(null); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 41a300c28f3a9..5614fb2fed27d 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -63,9 +63,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ReplicationGroup; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -221,7 +223,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : null; } }; @@ -305,7 +307,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : null; } }; @@ -384,6 +386,40 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept assertIndexShardCounter(0); } + public void testShardNotInPrimaryMode() { + final String index = "test"; + final ShardId shardId = new ShardId(index, "_na_", 0); + final ClusterState state = state(index, true, ShardRoutingState.RELOCATING); + setState(clusterService, state); + final ReplicationTask task = maybeTask(); + final Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + final AtomicBoolean executed = new AtomicBoolean(); + + final ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard(); + final long primaryTerm = state.metaData().index(index).primaryTerm(shardId.id()); + + isPrimaryMode.set(false); + + new TestAction(Settings.EMPTY, "internal:test-action", transportService, clusterService, shardStateAction, threadPool) { + @Override + protected PrimaryResult shardOperationOnPrimary(Request shardRequest, IndexShard primary) throws Exception { + assertPhase(task, "primary"); + assertFalse(executed.getAndSet(true)); + return super.shardOperationOnPrimary(shardRequest, primary); + } + }.new AsyncPrimaryAction(request, primaryShard.allocationId().getId(), primaryTerm, createTransportChannel(listener), task).run(); + + assertFalse(executed.get()); + assertIndexShardCounter(0); // no permit should be held + + final ExecutionException e = expectThrows(ExecutionException.class, listener::get); + assertThat(e.getCause(), instanceOf(ReplicationOperation.RetryOnPrimaryException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + assertThat(e.getCause().getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause().getCause(), hasToString(containsString("shard is not in primary mode"))); + } + /** * When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from * the relocation source to the relocation target. If relocation source receives and processes this cluster state @@ -744,11 +780,9 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { } AtomicReference failure = new AtomicReference<>(); - AtomicReference ignoredFailure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); proxy.failShardIfNeeded(replica, "test", new ElasticsearchException("simulated"), - () -> success.set(true), failure::set, ignoredFailure::set - ); + ActionListener.wrap(r -> success.set(true), failure::set)); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); // A replication action doesn't not fail the request assertEquals(0, shardFailedRequests.length); @@ -1138,6 +1172,8 @@ private void assertIndexShardCounter(int expected) { private final AtomicBoolean isRelocated = new AtomicBoolean(false); + private final AtomicBoolean isPrimaryMode = new AtomicBoolean(true); + /** * Sometimes build a ReplicationTask for tracking the phase of the * TransportReplicationAction. Since TransportReplicationAction has to work @@ -1277,10 +1313,16 @@ final IndexService mockIndexService(final IndexMetaData indexMetaData, ClusterSe private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; - count.incrementAndGet(); - callback.onResponse(count::decrementAndGet); + if (isPrimaryMode.get()) { + count.incrementAndGet(); + callback.onResponse(count::decrementAndGet); + + } else { + callback.onFailure(new ShardNotInPrimaryModeException(shardId, IndexShardState.STARTED)); + } return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), anyObject()); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 8cad76bcdfe5e..1cb1bfde34ea8 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -459,7 +459,7 @@ protected ClusterBlockLevel globalBlockLevel() { } @Override - protected ClusterBlockLevel indexBlockLevel() { + public ClusterBlockLevel indexBlockLevel() { return globalBlock == false ? ClusterBlockLevel.WRITE : super.indexBlockLevel(); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 6e1ec3c76797d..f540374a56c20 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -311,11 +311,9 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { } AtomicReference failure = new AtomicReference<>(); - AtomicReference ignoredFailure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); proxy.failShardIfNeeded(replica, "test", new ElasticsearchException("simulated"), - () -> success.set(true), failure::set, ignoredFailure::set - ); + ActionListener.wrap(r -> success.set(true), failure::set)); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); // A write replication action proxy should fail the shard assertEquals(1, shardFailedRequests.length); @@ -329,8 +327,6 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { transport.handleResponse(shardFailedRequest.requestId, TransportResponse.Empty.INSTANCE); assertTrue(success.get()); assertNull(failure.get()); - assertNull(ignoredFailure.get()); - } else if (randomBoolean()) { // simulate the primary has been demoted transport.handleRemoteError(shardFailedRequest.requestId, @@ -338,15 +334,12 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { "shard-failed-test")); assertFalse(success.get()); assertNotNull(failure.get()); - assertNull(ignoredFailure.get()); - } else { - // simulated an "ignored" exception + // simulated a node closing exception transport.handleRemoteError(shardFailedRequest.requestId, new NodeClosedException(state.nodes().getLocalNode())); assertFalse(success.get()); - assertNull(failure.get()); - assertNotNull(ignoredFailure.get()); + assertNotNull(failure.get()); } } diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 1a101b3340295..7ae8156088db1 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -101,7 +101,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } else if (ClusterStateAction.NAME.equals(action)) { TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); ClusterState clusterState = getMockClusterState(node); - transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, 0L, false)); + transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, false)); } else if (TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); Version version = node.getVersion(); diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index 3efd447c46873..e63f3a1d59a29 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -175,7 +175,7 @@ public void sendRequest(Transport.Connection conne address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); ((TransportResponseHandler) handler) - .handleResponse(new ClusterStateResponse(cluster1, builder.build(), 0L, false)); + .handleResponse(new ClusterStateResponse(cluster1, builder.build(), false)); clusterStateLatch.countDown(); } else if (TransportService.HANDSHAKE_ACTION_NAME .equals(action)) { ((TransportResponseHandler) handler).handleResponse( diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 3100dcbcc66a3..6796a23ef0f26 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -427,7 +427,7 @@ public void messageReceived(ClusterStateRequest request, TransportChannel channe DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(transportService.getLocalDiscoNode()).build(); ClusterState build = ClusterState.builder(ClusterName.DEFAULT).nodes(discoveryNodes).build(); - channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, 0L, false)); + channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, false)); } void failToRespond() { diff --git a/server/src/test/java/org/elasticsearch/cluster/GetClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/GetClusterStateTests.java deleted file mode 100644 index 69e571980a66d..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/GetClusterStateTests.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * Tests for the get cluster state API. - * - * See: {@link org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction} - * {@link org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction} - */ -public class GetClusterStateTests extends ESSingleNodeTestCase { - - public void testGetClusterState() { - ClusterStateResponse response = client().admin().cluster().prepareState().get(); - assertNotNull(response.getState()); - assertNotNull(response.getClusterName()); - // assume the cluster state size is 50 bytes or more, just so we aren't testing against size of 0 - assertThat(response.getTotalCompressedSize().getBytes(), greaterThanOrEqualTo(50L)); - } - - public void testSizeDerivedFromFullClusterState() { - ClusterStateResponse response = client().admin().cluster().prepareState().get(); - final ClusterState clusterState = response.getState(); - final long totalCompressedSize = response.getTotalCompressedSize().getBytes(); - // exclude the nodes from being returned, the total size returned should still be - // the same as when no filtering was applied to the cluster state retrieved - response = client().admin().cluster().prepareState().setNodes(false).get(); - assertEquals(totalCompressedSize, response.getTotalCompressedSize().getBytes()); - assertNotEquals(clusterState, response.getState()); - assertEquals(0, response.getState().nodes().getSize()); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index ad1a0516e4430..8f395c2d137a0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { @Override @@ -202,6 +202,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39172") public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().setBootstrapMasterNodeIndex(2); @@ -284,7 +285,7 @@ public void testCannotCommitStateThreeNodes() throws Exception { .build(); internalCluster().startNodes(3, settings); - ensureGreen(); // ensure cluster state is recovered before we disrupt things + ensureStableCluster(3); final String master = internalCluster().getMasterName(); Set otherNodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 071c8a0195531..5ce996a2e77fd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -84,6 +84,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38331") public void testElectOnlyBetweenMasterNodes() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); logger.info("--> start data node / non master node"); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index 7f32c086f975f..70c6f5d71bfab 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.transport.TransportRequest; @@ -491,4 +493,52 @@ public void testDoesNotIncludeExtraNodes() { deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); } + + public void testBootstrapsAutomaticallyWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(1)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertTrue(vc.hasQuorum(singletonList(localNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + + public void testFailBootstrapWithBothSingleNodeDiscoveryAndInitialMasterNodes() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(INITIAL_MASTER_NODES_SETTING.getKey(), "test"); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] is not allowed when [discovery.type] is set " + + "to [single-node]")); + } + + public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(Node.NODE_MASTER_SETTING.getKey(), false); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("node with [discovery.type] set to [single-node] must be master-eligible")); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 6e90aed5f74bf..8b08c9c3fc01e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -65,13 +65,14 @@ public void testScheduling() { = new DeterministicTaskQueue(Settings.builder().put(NODE_NAME_SETTING.getKey(), "node").build(), random()); final AtomicLong warningCount = new AtomicLong(); + final AtomicLong logLastFailedJoinAttemptWarningCount = new AtomicLong(); final ClusterFormationFailureHelper clusterFormationFailureHelper = new ClusterFormationFailureHelper(settingsBuilder.build(), () -> { warningCount.incrementAndGet(); return new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), emptyList(), 0L); }, - deterministicTaskQueue.getThreadPool()); + deterministicTaskQueue.getThreadPool(), () -> logLastFailedJoinAttemptWarningCount.incrementAndGet()); deterministicTaskQueue.runAllTasks(); assertThat("should not schedule anything yet", warningCount.get(), is(0L)); @@ -105,8 +106,10 @@ public void testScheduling() { deterministicTaskQueue.runAllTasksInTimeOrder(); assertThat(warningCount.get(), is(5L)); + assertThat(logLastFailedJoinAttemptWarningCount.get(), is(5L)); warningCount.set(0); + logLastFailedJoinAttemptWarningCount.set(0); clusterFormationFailureHelper.start(); clusterFormationFailureHelper.stop(); clusterFormationFailureHelper.start(); @@ -127,6 +130,7 @@ public void testScheduling() { deterministicTaskQueue.runAllTasksInTimeOrder(); assertThat(warningCount.get(), is(5L)); + assertThat(logLastFailedJoinAttemptWarningCount.get(), is(5L)); } public void testDescriptionOnMasterIneligibleNodes() { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index d481cfc5dcfcd..3952c4d00ea41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -26,8 +26,10 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.NodeConnectionsService; @@ -37,6 +39,8 @@ import org.elasticsearch.cluster.coordination.CoordinationState.PersistedState; import org.elasticsearch.cluster.coordination.Coordinator.Mode; import org.elasticsearch.cluster.coordination.CoordinatorTests.Cluster.ClusterNode; +import org.elasticsearch.cluster.coordination.LinearizabilityChecker.History; +import org.elasticsearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -47,10 +51,12 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -60,8 +66,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.discovery.zen.PublishClusterStateStats; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; +import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; @@ -90,6 +98,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -103,8 +112,6 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.clusterState; -import static org.elasticsearch.cluster.coordination.CoordinationStateTests.setValue; -import static org.elasticsearch.cluster.coordination.CoordinationStateTests.value; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.CANDIDATE; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.FOLLOWER; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.LEADER; @@ -119,11 +126,11 @@ import static org.elasticsearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; import static org.elasticsearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; import static org.elasticsearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ALL; import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ID; import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_SETTING; import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; +import static org.elasticsearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; @@ -134,6 +141,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -204,7 +212,7 @@ public void testCanUpdateClusterStateAfterStabilisation() { } public void testDoesNotElectNonMasterNode() { - final Cluster cluster = new Cluster(randomIntBetween(1, 5), false); + final Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -882,12 +890,12 @@ public void testIncompatibleDiffResendsFullState() { final PublishClusterStateStats prePublishStats = follower.coordinator.stats().getPublishStats(); logger.info("--> submitting first value to {}", leader); leader.submitValue(randomLong()); - cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY + defaultMillis(PUBLISH_TIMEOUT_SETTING), "publish first state"); + cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "publish first state"); logger.info("--> healing {}", follower); follower.heal(); logger.info("--> submitting second value to {}", leader); leader.submitValue(randomLong()); - cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY); + cluster.stabilise(); final PublishClusterStateStats postPublishStats = follower.coordinator.stats().getPublishStats(); assertEquals(prePublishStats.getFullClusterStateReceivedCount() + 1, postPublishStats.getFullClusterStateReceivedCount()); @@ -904,7 +912,7 @@ public void testIncompatibleDiffResendsFullState() { * and join the leader again. */ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { - final Cluster cluster = new Cluster(2, false); + final Cluster cluster = new Cluster(2, false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -916,8 +924,9 @@ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { nonLeader.coordinator.becomeCandidate("forced"); } logger.debug("simulate follower check coming through from {} to {}", leader.getId(), nonLeader.getId()); - nonLeader.coordinator.onFollowerCheckRequest(new FollowersChecker.FollowerCheckRequest(leader.coordinator.getCurrentTerm(), - leader.getLocalNode())); + expectThrows(CoordinationStateRejectedException.class, () -> nonLeader.coordinator.onFollowerCheckRequest( + new FollowersChecker.FollowerCheckRequest(leader.coordinator.getCurrentTerm(), leader.getLocalNode()))); + assertThat(nonLeader.coordinator.getMode(), equalTo(CANDIDATE)); }).run(); cluster.stabilise(); } @@ -944,7 +953,7 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock final Builder settingsBuilder = Settings.builder().put(cs.metaData().persistentSettings()); settingsBuilder.put(NO_MASTER_BLOCK_SETTING.getKey(), noMasterBlockSetting); return ClusterState.builder(cs).metaData(MetaData.builder(cs.metaData()).persistentSettings(settingsBuilder.build())).build(); - }); + }, (source, e) -> {}); cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "committing setting update"); leader.disconnect(); @@ -1028,7 +1037,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode shiftedNode = randomFrom(cluster2.clusterNodes).restartedNode(); final ClusterNode newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), - shiftedNode.getLocalNode(), n -> shiftedNode.persistedState); + shiftedNode.getLocalNode(), n -> shiftedNode.persistedState, shiftedNode.nodeSettings); cluster1.clusterNodes.add(newNode); MockLogAppender mockAppender = new MockLogAppender(); @@ -1052,7 +1061,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode detachedNode = newNode.restartedNode( metaData -> DetachClusterCommand.updateMetaData(metaData), - term -> DetachClusterCommand.updateCurrentTerm()); + term -> DetachClusterCommand.updateCurrentTerm(), newNode.nodeSettings); cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); cluster1.stabilise(); } @@ -1078,6 +1087,136 @@ public void testDiscoveryUsesNodesFromLastClusterState() { cluster.stabilise(); } + public void testFollowerRemovedIfUnableToSendRequestsToMaster() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final ClusterNode otherNode = cluster.getAnyNodeExcept(leader); + + cluster.blackholeConnectionsFrom(otherNode, leader); + + cluster.runFor( + (defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING)) + * defaultInt(FOLLOWER_CHECK_RETRY_COUNT_SETTING) + + (defaultMillis(LEADER_CHECK_INTERVAL_SETTING) + DEFAULT_DELAY_VARIABILITY) + * defaultInt(LEADER_CHECK_RETRY_COUNT_SETTING) + + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, + "awaiting removal of asymmetrically-partitioned node"); + + assertThat(leader.getLastAppliedClusterState().nodes().toString(), + leader.getLastAppliedClusterState().nodes().getSize(), equalTo(2)); + + cluster.clearBlackholedConnections(); + + cluster.stabilise( + // time for the disconnected node to find the master again + defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + // time for joining + + 4 * DEFAULT_DELAY_VARIABILITY + // Then a commit of the updated cluster state + + DEFAULT_CLUSTER_STATE_UPDATE_DELAY); + } + + public void testSingleNodeDiscoveryWithoutQuorum() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode clusterNode = cluster.getAnyNode(); + logger.debug("rebooting [{}]", clusterNode.getId()); + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + IllegalStateException ise = expectThrows(IllegalStateException.class, + () -> cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? + cn.restartedNode(Function.identity(), Function.identity(), Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()) : + cn)); + assertThat(ise.getMessage(), containsString("cannot start with [discovery.type] set to [single-node] when local node")); + assertThat(ise.getMessage(), containsString("does not have quorum in voting configuration")); + } + + public void testSingleNodeDiscoveryWithQuorum() { + final Cluster cluster = new Cluster(1, randomBoolean(), Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()); + cluster.runRandomly(); + cluster.stabilise(); + } + + private static class BrokenCustom extends AbstractDiffable implements ClusterState.Custom { + + static final String EXCEPTION_MESSAGE = "simulated"; + + @Override + public String getWriteableName() { + return "broken"; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new ElasticsearchException(EXCEPTION_MESSAGE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public void testClusterRecoversAfterExceptionDuringSerialization() { + final Cluster cluster = new Cluster(randomIntBetween(2, 5)); // 1-node cluster doesn't do any serialization + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader1 = cluster.getAnyLeader(); + + logger.info("--> submitting broken task to [{}]", leader1); + + final AtomicBoolean failed = new AtomicBoolean(); + leader1.submitUpdateTask("broken-task", + cs -> ClusterState.builder(cs).putCustom("broken", new BrokenCustom()).build(), + (source, e) -> { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(e.getCause().getMessage(), equalTo(BrokenCustom.EXCEPTION_MESSAGE)); + failed.set(true); + }); + cluster.runFor(DEFAULT_DELAY_VARIABILITY + 1, "processing broken task"); + assertTrue(failed.get()); + + cluster.stabilise(); + + final ClusterNode leader2 = cluster.getAnyLeader(); + long finalValue = randomLong(); + + logger.info("--> submitting value [{}] to [{}]", finalValue, leader2); + leader2.submitValue(finalValue); + cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY); + + for (final ClusterNode clusterNode : cluster.clusterNodes) { + final String nodeId = clusterNode.getId(); + final ClusterState appliedState = clusterNode.getLastAppliedClusterState(); + assertThat(nodeId + " has the applied value", value(appliedState), is(finalValue)); + } + } + private static long defaultMillis(Setting setting) { return setting.get(Settings.EMPTY).millis() + Cluster.DEFAULT_DELAY_VARIABILITY; } @@ -1140,7 +1279,10 @@ class Cluster { private final Set disconnectedNodes = new HashSet<>(); private final Set blackholedNodes = new HashSet<>(); + private final Set> blackholedConnections = new HashSet<>(); private final Map committedStatesByVersion = new HashMap<>(); + private final LinearizabilityChecker linearizabilityChecker = new LinearizabilityChecker(); + private final History history = new History(); private final Function defaultPersistedStateSupplier = MockPersistedState::new; @@ -1148,10 +1290,10 @@ class Cluster { private List seedHostsList; Cluster(int initialNodeCount) { - this(initialNodeCount, true); + this(initialNodeCount, true, Settings.EMPTY); } - Cluster(int initialNodeCount, boolean allNodesMasterEligible) { + Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings) { deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY); assertThat(initialNodeCount, greaterThan(0)); @@ -1160,7 +1302,7 @@ class Cluster { clusterNodes = new ArrayList<>(initialNodeCount); for (int i = 0; i < initialNodeCount; i++) { final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), - allNodesMasterEligible || i == 0 || randomBoolean()); + allNodesMasterEligible || i == 0 || randomBoolean(), nodeSettings); clusterNodes.add(clusterNode); if (clusterNode.getLocalNode().isMasterNode()) { masterEligibleNodeIds.add(clusterNode.getId()); @@ -1193,7 +1335,7 @@ List addNodes(int newNodesCount) { final List addedNodes = new ArrayList<>(); for (int i = 0; i < newNodesCount; i++) { - final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true); + final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true, Settings.EMPTY); addedNodes.add(clusterNode); } clusterNodes.addAll(addedNodes); @@ -1220,6 +1362,7 @@ void runRandomly(boolean allowReboots) { cleanupActions.add(() -> disruptStorage = false); final int randomSteps = scaledRandomIntBetween(10, 10000); + final int keyRange = randomSteps / 50; // for randomized writes and reads logger.info("--> start of safety phase of at least [{}] steps", randomSteps); deterministicTaskQueue.setExecutionDelayVariabilityMillis(EXTREME_DELAY_VARIABILITY); @@ -1238,13 +1381,22 @@ void runRandomly(boolean allowReboots) { } try { - if (rarely()) { + if (finishTime == -1 && randomBoolean() && randomBoolean() && randomBoolean()) { final ClusterNode clusterNode = getAnyNodePreferringLeaders(); + final int key = randomIntBetween(0, keyRange); final int newValue = randomInt(); clusterNode.onNode(() -> { logger.debug("----> [runRandomly {}] proposing new value [{}] to [{}]", thisStep, newValue, clusterNode.getId()); - clusterNode.submitValue(newValue); + clusterNode.submitValue(key, newValue); + }).run(); + } else if (finishTime == -1 && randomBoolean() && randomBoolean() && randomBoolean()) { + final ClusterNode clusterNode = getAnyNodePreferringLeaders(); + final int key = randomIntBetween(0, keyRange); + clusterNode.onNode(() -> { + logger.debug("----> [runRandomly {}] reading value from [{}]", + thisStep, clusterNode.getId()); + clusterNode.readValue(key); }).run(); } else if (rarely()) { final ClusterNode clusterNode = getAnyNodePreferringLeaders(); @@ -1365,6 +1517,10 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); + + final int pendingTaskCount = leader.masterService.getFakeMasterServicePendingTaskCount(); + runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); + final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); final String leaderId = leader.getId(); @@ -1377,6 +1533,8 @@ void stabilise(long stabilisationDurationMillis) { assertFalse(nodeId + " should not have an active publication", clusterNode.coordinator.publicationInProgress()); if (clusterNode == leader) { + assertThat(nodeId + " is still the leader", clusterNode.coordinator.getMode(), is(LEADER)); + assertThat(nodeId + " did not change term", clusterNode.coordinator.getCurrentTerm(), is(leaderTerm)); continue; } @@ -1425,6 +1583,11 @@ void stabilise(long stabilisationDurationMillis) { lastAcceptedState.getLastCommittedConfiguration(), equalTo(lastAcceptedState.getLastAcceptedConfiguration())); assertThat("current configuration is already optimal", leader.improveConfiguration(lastAcceptedState), sameInstance(lastAcceptedState)); + + logger.info("checking linearizability of history with size {}: {}", history.size(), history); + // See https://github.com/elastic/elasticsearch/issues/39437 + //assertTrue("history not linearizable: " + history, linearizabilityChecker.isLinearizable(spec, history, i -> null)); + logger.info("linearizability check completed"); } void bootstrapIfNecessary() { @@ -1490,6 +1653,8 @@ private ConnectionStatus getConnectionStatus(DiscoveryNode sender, DiscoveryNode connectionStatus = ConnectionStatus.BLACK_HOLE; } else if (disconnectedNodes.contains(sender.getId()) || disconnectedNodes.contains(destination.getId())) { connectionStatus = ConnectionStatus.DISCONNECTED; + } else if (blackholedConnections.contains(Tuple.tuple(sender.getId(), destination.getId()))) { + connectionStatus = ConnectionStatus.BLACK_HOLE_REQUESTS_ONLY; } else if (nodeExists(sender) && nodeExists(destination)) { connectionStatus = ConnectionStatus.CONNECTED; } else { @@ -1540,6 +1705,14 @@ void setEmptySeedHostsList() { seedHostsList = emptyList(); } + void blackholeConnectionsFrom(ClusterNode sender, ClusterNode destination) { + blackholedConnections.add(Tuple.tuple(sender.getId(), destination.getId())); + } + + void clearBlackholedConnections() { + blackholedConnections.clear(); + } + class MockPersistedState implements PersistedState { private final PersistedState delegate; private final NodeEnvironment nodeEnvironment; @@ -1641,6 +1814,7 @@ class ClusterNode { private Coordinator coordinator; private final DiscoveryNode localNode; private final MockPersistedState persistedState; + private final Settings nodeSettings; private AckedFakeThreadPoolMasterService masterService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; @@ -1648,13 +1822,15 @@ class ClusterNode { private DisruptableMockTransport mockTransport; private List> extraJoinValidators = new ArrayList<>(); - ClusterNode(int nodeIndex, boolean masterEligible) { - this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier); + ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings) { + this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier, nodeSettings); } - ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier) { + ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier, + Settings nodeSettings) { this.nodeIndex = nodeIndex; this.localNode = localNode; + this.nodeSettings = nodeSettings; persistedState = persistedStateSupplier.apply(localNode); onNodeLog(localNode, this::setUp).run(); } @@ -1678,7 +1854,8 @@ protected Optional getDisruptableMockTransport(Transpo } }; - final Settings settings = Settings.builder() + final Settings settings = nodeSettings.hasValue(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()) ? + nodeSettings : Settings.builder().put(nodeSettings) .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap transportService = mockTransport.createTransportService( @@ -1726,17 +1903,18 @@ void close() { } ClusterNode restartedNode() { - return restartedNode(Function.identity(), Function.identity()); + return restartedNode(Function.identity(), Function.identity(), nodeSettings); } - ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm) { + ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm, + Settings nodeSettings) { final TransportAddress address = randomBoolean() ? buildNewFakeTransportAddress() : localNode.getAddress(); final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), localNode.isMasterNode() ? EnumSet.allOf(Role.class) : emptySet(), Version.CURRENT); return new ClusterNode(nodeIndex, newLocalNode, - node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm)); + node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm), nodeSettings); } private PersistedState getPersistedState() { @@ -1801,14 +1979,55 @@ void submitSetAutoShrinkVotingConfiguration(final boolean autoShrinkVotingConfig .put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration) .build()) .build()) - .build()); + .build(), (source, e) -> {}); } AckCollector submitValue(final long value) { - return submitUpdateTask("new value [" + value + "]", cs -> setValue(cs, value)); + return submitValue(0, value); + } + + AckCollector submitValue(final int key, final long value) { + final int eventId = history.invoke(new Tuple<>(key, value)); + return submitUpdateTask("new value [" + value + "]", cs -> setValue(cs, key, value), new ClusterStateTaskListener() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + history.respond(eventId, value(oldState, key)); + } + + @Override + public void onNoLongerMaster(String source) { + // in this case, we know for sure that event was not processed by the system and will not change history + // remove event to help avoid bloated history and state space explosion in linearizability checker + history.remove(eventId); + } + + @Override + public void onFailure(String source, Exception e) { + // do not remove event from history, the write might still take place + // instead, complete history when checking for linearizability + } + }); + } + + void readValue(int key) { + final int eventId = history.invoke(new Tuple<>(key, null)); + submitUpdateTask("read value", cs -> ClusterState.builder(cs).build(), new ClusterStateTaskListener() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + history.respond(eventId, value(newState, key)); + } + + @Override + public void onFailure(String source, Exception e) { + // reads do not change state + // remove event to help avoid bloated history and state space explosion in linearizability checker + history.remove(eventId); + } + }); } - AckCollector submitUpdateTask(String source, UnaryOperator clusterStateUpdate) { + AckCollector submitUpdateTask(String source, UnaryOperator clusterStateUpdate, + ClusterStateTaskListener taskListener) { final AckCollector ackCollector = new AckCollector(); onNode(() -> { logger.trace("[{}] submitUpdateTask: enqueueing [{}]", localNode.getId(), source); @@ -1825,6 +2044,13 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { logger.debug(() -> new ParameterizedMessage("failed to publish: [{}]", source), e); + taskListener.onFailure(source, e); + } + + @Override + public void onNoLongerMaster(String source) { + logger.trace("no longer master: [{}]", source); + taskListener.onNoLongerMaster(source); } @Override @@ -1832,8 +2058,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS updateCommittedStates(); ClusterState state = committedStatesByVersion.get(newState.version()); assertNotNull("State not committed : " + newState.toString(), state); - assertEquals(value(state), value(newState)); + assertStateEquals(state, newState); logger.trace("successfully published: [{}]", newState); + taskListener.clusterStateProcessed(source, oldState, newState); } }); }).run(); @@ -2067,4 +2294,85 @@ enum ClusterStateApplyResponse { HANG, } + public ClusterState setValue(ClusterState clusterState, int key, long value) { + return ClusterState.builder(clusterState).metaData( + MetaData.builder(clusterState.metaData()) + .persistentSettings(Settings.builder() + .put(clusterState.metaData().persistentSettings()) + .put("value_" + key, value) + .build()) + .build()) + .build(); + } + + public long value(ClusterState clusterState) { + return value(clusterState, 0); + } + + public long value(ClusterState clusterState, int key) { + return clusterState.metaData().persistentSettings().getAsLong("value_" + key, 0L); + } + + public void assertStateEquals(ClusterState clusterState1, ClusterState clusterState2) { + assertEquals(clusterState1.version(), clusterState2.version()); + assertEquals(clusterState1.term(), clusterState2.term()); + assertEquals(keySet(clusterState1), keySet(clusterState2)); + for (int key : keySet(clusterState1)) { + assertEquals(value(clusterState1, key), value(clusterState2, key)); + } + } + + public Set keySet(ClusterState clusterState) { + return clusterState.metaData().persistentSettings().keySet().stream() + .filter(s -> s.startsWith("value_")).map(s -> Integer.valueOf(s.substring("value_".length()))).collect(Collectors.toSet()); + } + + /** + * Simple register model. Writes are modeled by providing an integer input. Reads are modeled by providing null as input. + * Responses that time out are modeled by returning null. Successful writes return the previous value of the register. + */ + private final SequentialSpec spec = new LinearizabilityChecker.KeyedSpec() { + @Override + public Object getKey(Object value) { + return ((Tuple) value).v1(); + } + + @Override + public Object getValue(Object value) { + return ((Tuple) value).v2(); + } + + @Override + public Object initialState() { + return 0L; + } + + @Override + public Optional nextState(Object currentState, Object input, Object output) { + // null input is read, non-null is write + if (input == null) { + // history is completed with null, simulating timeout, which assumes that read went through + if (output == null || currentState.equals(output)) { + return Optional.of(currentState); + } + return Optional.empty(); + } else { + if (output == null || currentState.equals(output)) { + // history is completed with null, simulating timeout, which assumes that write went through + return Optional.of(input); + } + return Optional.empty(); + } + } + }; + + public void testRegisterSpecConsistency() { + assertThat(spec.initialState(), equalTo(0L)); + assertThat(spec.nextState(7, 42, 7), equalTo(Optional.of(42))); // successful write 42 returns previous value 7 + assertThat(spec.nextState(7, 42, null), equalTo(Optional.of(42))); // write 42 times out + assertThat(spec.nextState(7, null, 7), equalTo(Optional.of(7))); // successful read + assertThat(spec.nextState(7, null, null), equalTo(Optional.of(7))); // read times out + assertThat(spec.nextState(7, null, 42), equalTo(Optional.empty())); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java index 4f1016847c887..a57810b88e500 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; @@ -73,7 +72,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsInstanceOf.instanceOf; -import static org.mockito.Mockito.mock; public class FollowersCheckerTests extends ESTestCase { @@ -548,14 +546,16 @@ public String executor() { } } - private void testPreferMasterNodes() { + public void testPreferMasterNodes() { List nodes = randomNodes(10); DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); nodes.forEach(dn -> discoNodesBuilder.add(dn)); DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); CapturingTransport capturingTransport = new CapturingTransport(); - TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, emptySet()); + final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), nodes.get(0).getName()).build(); + final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); + TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, + deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, emptySet()); final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 4361660876c7a..d354c1d46b2b3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -18,12 +18,23 @@ */ package org.elasticsearch.cluster.coordination; +import org.apache.logging.log4j.Level; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.MembershipAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; +import org.elasticsearch.test.transport.MockTransport; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -31,7 +42,9 @@ import java.util.Optional; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.Is.is; public class JoinHelperTests extends ESTestCase { @@ -51,6 +64,8 @@ public void testJoinDeduplication() { DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT); + assertFalse(joinHelper.isJoinPending()); + // check that sending a join to node1 works Optional optionalJoin1 = randomBoolean() ? Optional.empty() : Optional.of(new Join(localNode, node1, randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong())); @@ -60,6 +75,8 @@ public void testJoinDeduplication() { CapturedRequest capturedRequest1 = capturedRequests1[0]; assertEquals(node1, capturedRequest1.node); + assertTrue(joinHelper.isJoinPending()); + // check that sending a join to node2 works Optional optionalJoin2 = randomBoolean() ? Optional.empty() : Optional.of(new Join(localNode, node2, randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong())); @@ -95,5 +112,75 @@ public void testJoinDeduplication() { assertThat(capturedRequests2a.length, equalTo(1)); CapturedRequest capturedRequest2a = capturedRequests2a[0]; assertEquals(node2, capturedRequest2a.node); + + // complete all the joins and check that isJoinPending is updated + assertTrue(joinHelper.isJoinPending()); + capturingTransport.handleRemoteError(capturedRequest2.requestId, new CoordinationStateRejectedException("dummy")); + capturingTransport.handleRemoteError(capturedRequest1a.requestId, new CoordinationStateRejectedException("dummy")); + capturingTransport.handleRemoteError(capturedRequest2a.requestId, new CoordinationStateRejectedException("dummy")); + assertFalse(joinHelper.isJoinPending()); + } + + public void testFailedJoinAttemptLogLevel() { + assertThat(JoinHelper.FailedJoinAttempt.getLogLevel(new TransportException("generic transport exception")), is(Level.INFO)); + + assertThat(JoinHelper.FailedJoinAttempt.getLogLevel( + new RemoteTransportException("remote transport exception with generic cause", new Exception())), is(Level.INFO)); + + assertThat(JoinHelper.FailedJoinAttempt.getLogLevel( + new RemoteTransportException("caused by CoordinationStateRejectedException", + new CoordinationStateRejectedException("test"))), is(Level.DEBUG)); + + assertThat(JoinHelper.FailedJoinAttempt.getLogLevel( + new RemoteTransportException("caused by FailedToCommitClusterStateException", + new FailedToCommitClusterStateException("test"))), is(Level.DEBUG)); + + assertThat(JoinHelper.FailedJoinAttempt.getLogLevel( + new RemoteTransportException("caused by NotMasterException", + new NotMasterException("test"))), is(Level.DEBUG)); + } + + public void testZen1JoinValidationRejectsMismatchedClusterUUID() { + assertJoinValidationRejectsMismatchedClusterUUID(MembershipAction.DISCOVERY_JOIN_VALIDATE_ACTION_NAME, + "mixed-version cluster join validation on cluster state with a different cluster uuid"); + } + + public void testJoinValidationRejectsMismatchedClusterUUID() { + assertJoinValidationRejectsMismatchedClusterUUID(JoinHelper.VALIDATE_JOIN_ACTION_NAME, + "join validation on cluster state with a different cluster uuid"); + } + + private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, String expectedMessage) { + DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue( + Settings.builder().put(NODE_NAME_SETTING.getKey(), "node0").build(), random()); + MockTransport mockTransport = new MockTransport(); + DiscoveryNode localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT); + + final ClusterState localClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder() + .generateClusterUuidIfNeeded().clusterUUIDCommitted(true)).build(); + + TransportService transportService = mockTransport.createTransportService(Settings.EMPTY, + deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> localNode, null, Collections.emptySet()); + new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> localClusterState, + (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, + Collections.emptyList()); // registers request handler + transportService.start(); + transportService.acceptIncomingRequests(); + + final ClusterState otherClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder() + .generateClusterUuidIfNeeded()).build(); + + final PlainActionFuture future = new PlainActionFuture<>(); + transportService.sendRequest(localNode, actionName, + new ValidateJoinRequest(otherClusterState), + new ActionListenerResponseHandler<>(future, in -> TransportResponse.Empty.INSTANCE)); + deterministicTaskQueue.runAllTasks(); + + final CoordinationStateRejectedException coordinationStateRejectedException + = expectThrows(CoordinationStateRejectedException.class, future::actionGet); + assertThat(coordinationStateRejectedException.getMessage(), containsString(expectedMessage)); + assertThat(coordinationStateRejectedException.getMessage(), containsString(localClusterState.metaData().clusterUUID())); + assertThat(coordinationStateRejectedException.getMessage(), containsString(otherClusterState.metaData().clusterUUID())); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java rename to server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index cafe050726c54..35fa5786bbda3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.discovery.zen; +package org.elasticsearch.cluster.coordination; import org.elasticsearch.Version; import org.elasticsearch.cluster.coordination.JoinTaskExecutor; @@ -36,7 +36,7 @@ import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; -public class MembershipActionTests extends ESTestCase { +public class JoinTaskExecutorTests extends ESTestCase { public void testPreventJoinClusterWithNewerIndices() { Settings.builder().build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java new file mode 100644 index 0000000000000..94188c0fa5a45 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java @@ -0,0 +1,376 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.common.collect.Tuple; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; + +/** + * Basic implementation of the Wing and Gong Graph Search Algorithm, following the descriptions in + * Gavin Lowe: Testing for linearizability + * Concurrency and Computation: Practice and Experience 29, 4 (2017). http://dx.doi.org/10.1002/cpe.3928 + * Alex Horn and Daniel Kroening: Faster linearizability checking via P-compositionality + * FORTE (2015). http://dx.doi.org/10.1007/978-3-319-19195-9_4 + */ +public class LinearizabilityChecker { + + /** + * Sequential specification of a datatype. Used as input for the linearizability checker. + * All parameter and return values should be immutable and have proper equals / hashCode implementations + */ + public interface SequentialSpec { + /** + * Returns the initial state of the datatype + */ + Object initialState(); + + /** + * Next-state function, checking whether transitioning the datatype in the given state under the provided input and output is valid. + * + * @param currentState the current state of the datatype + * @param input the input, associated with the given invocation event + * @param output the output, associated with the corresponding response event + * @return the next state, if the given current state, input and output are a valid transition, or Optional.empty() otherwise + */ + Optional nextState(Object currentState, Object input, Object output); + + /** + * For compositional checking, the history can be partitioned into sub-histories + * + * @param events the history of events to partition + * @return the partitioned history + */ + default Collection> partition(List events) { + return Collections.singleton(events); + } + } + + /** + * Sequential specification of a datatype that allows for keyed access, + * providing compositional checking (see {@link SequentialSpec#partition(List)}). + */ + public interface KeyedSpec extends SequentialSpec { + /** + * extracts the key from the given keyed invocation input value + */ + Object getKey(Object value); + + /** + * extracts the key-less value from the given keyed invocation input value + */ + Object getValue(Object value); + + @Override + default Collection> partition(List events) { + final Map> keyedPartitions = new HashMap<>(); + final Map matches = new HashMap<>(); + for (Event event : events) { + if (event.type == EventType.INVOCATION) { + final Object key = getKey(event.value); + final Object val = getValue(event.value); + final Event unfoldedEvent = new Event(EventType.INVOCATION, val, event.id); + keyedPartitions.computeIfAbsent(key, k -> new ArrayList<>()).add(unfoldedEvent); + matches.put(event.id, key); + } else { + final Object key = matches.get(event.id); + keyedPartitions.get(key).add(event); + } + } + return keyedPartitions.values(); + } + } + + /** + * Sequence of invocations and responses, recording the run of a concurrent system. + */ + public static class History { + private final List events; + private int nextId; + + public History() { + events = new ArrayList<>(); + nextId = 0; + } + + /** + * Appends a new invocation event to the history + * + * @param input the input value associated with the invocation event + * @return an id that can be used to record the corresponding response event + */ + public int invoke(Object input) { + final int id = nextId++; + events.add(new Event(EventType.INVOCATION, input, id)); + return id; + } + + /** + * Appends a new response event to the history + * + * @param id the id of the corresponding invocation event + * @param output the output value associated with the response event + */ + public void respond(int id, Object output) { + events.add(new Event(EventType.RESPONSE, output, id)); + } + + /** + * Removes the events with the corresponding id from the history + * + * @param id the value of the id to remove + */ + public void remove(int id) { + events.removeIf(e -> e.id == id); + } + + /** + * Completes the history with response events for invocations that are missing corresponding responses + * + * @param missingResponseGenerator a function from invocation input to response output, used to generate the corresponding response + */ + public void complete(Function missingResponseGenerator) { + final Map uncompletedInvocations = new HashMap<>(); + for (Event event : events) { + if (event.type == EventType.INVOCATION) { + uncompletedInvocations.put(event.id, event); + } else { + final Event removed = uncompletedInvocations.remove(event.id); + if (removed == null) { + throw new IllegalArgumentException("history not well-formed: " + events); + } + } + } + for (Map.Entry entry : uncompletedInvocations.entrySet()) { + events.add(new Event(EventType.RESPONSE, missingResponseGenerator.apply(entry.getValue().value), entry.getKey())); + } + } + + @Override + public History clone() { + final History history = new History(); + history.events.addAll(events); + history.nextId = nextId; + return history; + } + + /** + * Returns the number of recorded events + */ + public int size() { + return events.size(); + } + + @Override + public String toString() { + return "History{" + + "events=" + events + + ", nextId=" + nextId + + '}'; + } + } + + /** + * Checks whether the provided history is linearizable with respect to the given sequential specification + * + * @param spec the sequential specification of the datatype + * @param history the history of events to check for linearizability + * @param missingResponseGenerator used to complete the history with missing responses + * @return true iff the history is linearizable w.r.t. the given spec + */ + public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator) { + history = history.clone(); // clone history before completing it + history.complete(missingResponseGenerator); // complete history + final Collection> partitions = spec.partition(history.events); + return partitions.stream().allMatch(h -> isLinearizable(spec, h)); + } + + private boolean isLinearizable(SequentialSpec spec, List history) { + Object state = spec.initialState(); // the current state of the datatype + final FixedBitSet linearized = new FixedBitSet(history.size() / 2); // the linearized prefix of the history + + final Set> cache = new HashSet<>(); // cache of explored pairs + final Deque> calls = new LinkedList<>(); // path we're currently exploring + + final Entry headEntry = createLinkedEntries(history); + Entry entry = headEntry.next; // current entry + + while (headEntry.next != null) { + if (entry.match != null) { + final Optional maybeNextState = spec.nextState(state, entry.event.value, entry.match.event.value); + boolean shouldExploreNextState = false; + if (maybeNextState.isPresent()) { + // check if we have already explored this linearization + final FixedBitSet updatedLinearized = linearized.clone(); + updatedLinearized.set(entry.id); + shouldExploreNextState = cache.add(new Tuple<>(maybeNextState.get(), updatedLinearized)); + } + if (shouldExploreNextState) { + calls.push(new Tuple<>(entry, state)); + state = maybeNextState.get(); + linearized.set(entry.id); + entry.lift(); + entry = headEntry.next; + } else { + entry = entry.next; + } + } else { + if (calls.isEmpty()) { + return false; + } + final Tuple top = calls.pop(); + entry = top.v1(); + state = top.v2(); + linearized.clear(entry.id); + entry.unlift(); + entry = entry.next; + } + } + return true; + } + + /** + * Convenience method for {@link #isLinearizable(SequentialSpec, History, Function)} that requires the history to be complete + */ + public boolean isLinearizable(SequentialSpec spec, History history) { + return isLinearizable(spec, history, o -> { + throw new IllegalArgumentException("history is not complete"); + }); + } + + /** + * Creates the internal linked data structure used by the linearizability checker. + * Generates contiguous internal ids for the events so that they can be efficiently recorded in bit sets. + */ + private static Entry createLinkedEntries(List history) { + if (history.size() % 2 != 0) { + throw new IllegalArgumentException("mismatch between number of invocations and responses"); + } + + // first, create entries and link response events to invocation events + final Map matches = new HashMap<>(); // map from event id to matching response entry + final Entry[] entries = new Entry[history.size()]; + int nextInternalId = (history.size() / 2) - 1; + for (int i = history.size() - 1; i >= 0; i--) { + final Event elem = history.get(i); + if (elem.type == EventType.RESPONSE) { + final Entry entry = entries[i] = new Entry(elem, null, nextInternalId--); + final Entry prev = matches.put(elem.id, entry); + if (prev != null) { + throw new IllegalArgumentException("duplicate response with id " + elem.id); + } + } else { + final Entry matchingResponse = matches.get(elem.id); + if (matchingResponse == null) { + throw new IllegalArgumentException("no matching response found for " + elem); + } + entries[i] = new Entry(elem, matchingResponse, matchingResponse.id); + } + } + + // sanity check + if (nextInternalId != -1) { + throw new IllegalArgumentException("id mismatch"); + } + + // now link entries together in history order, and add a sentinel node at the beginning + Entry first = new Entry(null, null, -1); + Entry lastEntry = first; + for (Entry entry : entries) { + lastEntry.next = entry; + entry.prev = lastEntry; + lastEntry = entry; + } + + return first; + } + + enum EventType { + INVOCATION, + RESPONSE + } + + public static class Event { + public final EventType type; + public final Object value; + public final int id; + + public Event(EventType type, Object value, int id) { + this.type = type; + this.value = value; + this.id = id; + } + + @Override + public String toString() { + return "Event{" + + "type=" + type + + ", value=" + value + + ", id=" + id + + '}'; + } + } + + static class Entry { + final Event event; + final Entry match; // null if current entry is a response, non-null if it's an invocation + final int id; // internal id, distinct from Event.id + Entry prev; + Entry next; + + Entry(Event event, Entry match, int id) { + this.event = event; + this.match = match; + this.id = id; + } + + // removes this entry from the surrounding structures + void lift() { + prev.next = next; + next.prev = prev; + match.prev.next = match.next; + if (match.next != null) { + match.next.prev = match.prev; + } + } + + // reinserts this entry into the surrounding structures + void unlift() { + match.prev.next = match; + if (match.next != null) { + match.next.prev = match; + } + prev.next = this; + next.prev = this; + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java new file mode 100644 index 0000000000000..4b2acd72877dc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java @@ -0,0 +1,271 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.cluster.coordination.LinearizabilityChecker.History; +import org.elasticsearch.cluster.coordination.LinearizabilityChecker.KeyedSpec; +import org.elasticsearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; + +import java.util.Optional; + +import static org.hamcrest.Matchers.equalTo; + +public class LinearizabilityCheckerTests extends ESTestCase { + + final LinearizabilityChecker checker = new LinearizabilityChecker(); + + /** + * Simple specification of a lock that can be exactly locked once. There is no unlocking. + * Input is always null (and represents lock acquisition), output is a boolean whether lock was acquired. + */ + final SequentialSpec lockSpec = new SequentialSpec() { + + @Override + public Object initialState() { + return false; + } + + @Override + public Optional nextState(Object currentState, Object input, Object output) { + if (input != null) { + throw new AssertionError("invalid history: input must be null"); + } + if (output instanceof Boolean == false) { + throw new AssertionError("invalid history: output must be boolean"); + } + if (false == (boolean) currentState) { + if (false == (boolean) output) { + return Optional.empty(); + } + return Optional.of(true); + } else if (false == (boolean) output) { + return Optional.of(currentState); + } + return Optional.empty(); + } + }; + + public void testLockConsistent() { + assertThat(lockSpec.initialState(), equalTo(false)); + assertThat(lockSpec.nextState(false, null, true), equalTo(Optional.of(true))); + assertThat(lockSpec.nextState(false, null, false), equalTo(Optional.empty())); + assertThat(lockSpec.nextState(true, null, false), equalTo(Optional.of(true))); + assertThat(lockSpec.nextState(true, null, true), equalTo(Optional.empty())); + } + + public void testLockWithLinearizableHistory1() { + final History history = new History(); + int call0 = history.invoke(null); // 0: acquire lock + history.respond(call0, true); // 0: lock acquisition succeeded + int call1 = history.invoke(null); // 1: acquire lock + history.respond(call1, false); // 0: lock acquisition failed + assertTrue(checker.isLinearizable(lockSpec, history)); + } + + public void testLockWithLinearizableHistory2() { + final History history = new History(); + int call0 = history.invoke(null); // 0: acquire lock + int call1 = history.invoke(null); // 1: acquire lock + history.respond(call0, false); // 0: lock acquisition failed + history.respond(call1, true); // 0: lock acquisition succeeded + assertTrue(checker.isLinearizable(lockSpec, history)); + } + + public void testLockWithLinearizableHistory3() { + final History history = new History(); + int call0 = history.invoke(null); // 0: acquire lock + int call1 = history.invoke(null); // 1: acquire lock + history.respond(call0, true); // 0: lock acquisition succeeded + history.respond(call1, false); // 0: lock acquisition failed + assertTrue(checker.isLinearizable(lockSpec, history)); + } + + public void testLockWithNonLinearizableHistory() { + final History history = new History(); + int call0 = history.invoke(null); // 0: acquire lock + history.respond(call0, false); // 0: lock acquisition failed + int call1 = history.invoke(null); // 1: acquire lock + history.respond(call1, true); // 0: lock acquisition succeeded + assertFalse(checker.isLinearizable(lockSpec, history)); + } + + /** + * Simple specification of a read/write register. + * Writes are modeled as integer inputs (with corresponding null responses) and + * reads are modeled as null inputs with integer outputs. + */ + final SequentialSpec registerSpec = new SequentialSpec() { + + @Override + public Object initialState() { + return 0; + } + + @Override + public Optional nextState(Object currentState, Object input, Object output) { + if ((input == null) == (output == null)) { + throw new AssertionError("invalid history: exactly one of input or output must be null"); + } + if (input != null) { + return Optional.of(input); + } else if (output.equals(currentState)) { + return Optional.of(currentState); + } + return Optional.empty(); + } + }; + + public void testRegisterConsistent() { + assertThat(registerSpec.initialState(), equalTo(0)); + assertThat(registerSpec.nextState(7, 42, null), equalTo(Optional.of(42))); + assertThat(registerSpec.nextState(7, null, 7), equalTo(Optional.of(7))); + assertThat(registerSpec.nextState(7, null, 42), equalTo(Optional.empty())); + } + + public void testRegisterWithLinearizableHistory() { + final History history = new History(); + int call0 = history.invoke(42); // 0: invoke write 42 + int call1 = history.invoke(null); // 1: invoke read + int call2 = history.invoke(null); // 2: invoke read + history.respond(call2, 0); // 2: read returns 0 + history.respond(call1, 42); // 1: read returns 42 + + expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(registerSpec, history)); + assertTrue(checker.isLinearizable(registerSpec, history, i -> null)); + + history.respond(call0, null); // 0: write returns + assertTrue(checker.isLinearizable(registerSpec, history)); + } + + public void testRegisterWithNonLinearizableHistory() { + final History history = new History(); + int call0 = history.invoke(42); // 0: invoke write 42 + int call1 = history.invoke(null); // 1: invoke read + history.respond(call1, 42); // 1: read returns 42 + int call2 = history.invoke(null); // 2: invoke read + history.respond(call2, 0); // 2: read returns 0, not allowed + + expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(registerSpec, history)); + assertFalse(checker.isLinearizable(registerSpec, history, i -> null)); + + history.respond(call0, null); // 0: write returns + assertFalse(checker.isLinearizable(registerSpec, history)); + } + + public void testRegisterObservedSequenceOfUpdatesWitLinearizableHistory() { + final History history = new History(); + int call0 = history.invoke(42); // 0: invoke write 42 + int call1 = history.invoke(43); // 1: invoke write 43 + int call2 = history.invoke(null); // 2: invoke read + history.respond(call2, 42); // 1: read returns 42 + int call3 = history.invoke(null); // 3: invoke read + history.respond(call3, 43); // 3: read returns 43 + int call4 = history.invoke(null); // 4: invoke read + history.respond(call4, 43); // 4: read returns 43 + + history.respond(call0, null); // 0: write returns + history.respond(call1, null); // 1: write returns + + assertTrue(checker.isLinearizable(registerSpec, history)); + } + + public void testRegisterObservedSequenceOfUpdatesWithNonLinearizableHistory() { + final History history = new History(); + int call0 = history.invoke(42); // 0: invoke write 42 + int call1 = history.invoke(43); // 1: invoke write 43 + int call2 = history.invoke(null); // 2: invoke read + history.respond(call2, 42); // 1: read returns 42 + int call3 = history.invoke(null); // 3: invoke read + history.respond(call3, 43); // 3: read returns 43 + int call4 = history.invoke(null); // 4: invoke read + history.respond(call4, 42); // 4: read returns 42, not allowed + + history.respond(call0, null); // 0: write returns + history.respond(call1, null); // 1: write returns + + assertFalse(checker.isLinearizable(registerSpec, history)); + } + + final SequentialSpec multiRegisterSpec = new KeyedSpec() { + + @Override + public Object getKey(Object value) { + return ((Tuple) value).v1(); + } + + @Override + public Object getValue(Object value) { + return ((Tuple) value).v2(); + } + + @Override + public Object initialState() { + return registerSpec.initialState(); + } + + @Override + public Optional nextState(Object currentState, Object input, Object output) { + return registerSpec.nextState(currentState, input, output); + } + }; + + public void testMultiRegisterWithLinearizableHistory() { + final History history = new History(); + int callX0 = history.invoke(new Tuple<>("x", 42)); // 0: invoke write 42 on key x + int callX1 = history.invoke(new Tuple<>("x", null)); // 1: invoke read on key x + int callY0 = history.invoke(new Tuple<>("y", 42)); // 0: invoke write 42 on key y + int callY1 = history.invoke(new Tuple<>("y", null)); // 1: invoke read on key y + int callX2 = history.invoke(new Tuple<>("x", null)); // 2: invoke read on key x + int callY2 = history.invoke(new Tuple<>("y", null)); // 2: invoke read on key y + history.respond(callX2, 0); // 2: read returns 0 on key x + history.respond(callY2, 0); // 2: read returns 0 on key y + history.respond(callY1, 42); // 1: read returns 42 on key y + history.respond(callX1, 42); // 1: read returns 42 on key x + + expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(multiRegisterSpec, history)); + assertTrue(checker.isLinearizable(multiRegisterSpec, history, i -> null)); + + history.respond(callX0, null); // 0: write returns on key x + history.respond(callY0, null); // 0: write returns on key y + assertTrue(checker.isLinearizable(multiRegisterSpec, history)); + } + + public void testMultiRegisterWithNonLinearizableHistory() { + final History history = new History(); + int callX0 = history.invoke(new Tuple<>("x", 42)); // 0: invoke write 42 on key x + int callX1 = history.invoke(new Tuple<>("x", null)); // 1: invoke read on key x + int callY0 = history.invoke(new Tuple<>("y", 42)); // 0: invoke write 42 on key y + int callY1 = history.invoke(new Tuple<>("y", null)); // 1: invoke read on key y + int callX2 = history.invoke(new Tuple<>("x", null)); // 2: invoke read on key x + history.respond(callY1, 42); // 1: read returns 42 on key y + int callY2 = history.invoke(new Tuple<>("y", null)); // 2: invoke read on key y + history.respond(callX2, 0); // 2: read returns 0 on key x + history.respond(callY2, 0); // 2: read returns 0 on key y, not allowed + history.respond(callX1, 42); // 1: read returns 42 on key x + + expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(multiRegisterSpec, history)); + assertFalse(checker.isLinearizable(multiRegisterSpec, history, i -> null)); + + history.respond(callX0, null); // 0: write returns on key x + history.respond(callY0, null); // 0: write returns on key y + assertFalse(checker.isLinearizable(multiRegisterSpec, history)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index d332888c185ac..d4cab5110eef2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -25,11 +25,12 @@ import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.zen.PublishClusterStateActionTests.AssertingAckListener; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; @@ -45,6 +46,8 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -57,6 +60,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; public class PublicationTests extends ESTestCase { @@ -478,4 +482,43 @@ private static DiscoveryNode newNode(int nodeId, Map attributes, return ts.stream(); }); } + + public static class AssertingAckListener implements Discovery.AckListener { + private final List> errors = new CopyOnWriteArrayList<>(); + private final Set successfulAcks = Collections.synchronizedSet(new HashSet<>()); + private final CountDownLatch countDown; + private final CountDownLatch commitCountDown; + + public AssertingAckListener(int nodeCount) { + countDown = new CountDownLatch(nodeCount); + commitCountDown = new CountDownLatch(1); + } + + @Override + public void onCommit(TimeValue commitTime) { + commitCountDown.countDown(); + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { + if (e != null) { + errors.add(new Tuple<>(node, e)); + } else { + successfulAcks.add(node); + } + countDown.countDown(); + } + + public Set await(long timeout, TimeUnit unit) throws InterruptedException { + assertThat(awaitErrors(timeout, unit), emptyIterable()); + assertTrue(commitCountDown.await(timeout, unit)); + return new HashSet<>(successfulAcks); + } + + public List> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { + countDown.await(timeout, unit); + return errors; + } + + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java similarity index 98% rename from server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java rename to server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index f6d67ed73a433..460f5133bf14d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -49,8 +49,8 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) -@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") -public class ElasticsearchNodeCommandIT extends ESIntegTestCase { +@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") +public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { private MockTerminal executeCommand(ElasticsearchNodeCommand command, Environment environment, int nodeOrdinal, boolean abort) throws Exception { @@ -267,8 +267,12 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> start 2nd and 3rd master-eligible nodes and bootstrap"); masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 + logger.info("--> wait for all nodes to join the cluster"); + ensureStableCluster(4); + logger.info("--> create index test"); createIndex("test"); + ensureGreen("test"); logger.info("--> stop 2nd and 3d master eligible node"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java index 3ba9f58523d16..d508853a90173 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java @@ -25,10 +25,10 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -50,6 +50,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.StreamSupport; @@ -61,6 +62,7 @@ import static org.elasticsearch.cluster.coordination.PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.test.InternalTestCluster.REMOVED_MINIMUM_MASTER_NODES; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -311,8 +313,9 @@ public void testMultipleNodeMigrationFromZen1ToZen2WithThreeNodes() throws Excep public void testFreshestMasterElectedAfterFullClusterRestart() throws Exception { final List nodeNames = internalCluster().startNodes(3, ZEN1_SETTINGS); + // Set setting to a non-default value on all nodes. assertTrue(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.ALL)).get().isAcknowledged()); + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NEW_PRIMARIES)).get().isAcknowledged()); final List nodeEnvironments = StreamSupport.stream(internalCluster().getDataOrMasterNodeInstances(NodeEnvironment.class).spliterator(), false) @@ -335,6 +338,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { .waitForNoRelocatingShards(true) .waitForNodes("2")).actionGet().isTimedOut()); + // Set setting to a different non-default value on two of the three remaining nodes. assertTrue(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE)).get().isAcknowledged()); } @@ -361,13 +365,14 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); - assertFalse(client().admin().cluster().health(Requests.clusterHealthRequest() - .waitForEvents(Priority.LANGUID) - .waitForNoRelocatingShards(true) - .waitForNodes("3")).actionGet().isTimedOut()); + final AtomicReference clusterState = new AtomicReference<>(); + assertBusy(() -> { + clusterState.set(client().admin().cluster().prepareState().get().getState()); + assertFalse(clusterState.get().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); + }); - assertThat(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get( - client().admin().cluster().state(new ClusterStateRequest()).get().getState().metaData().settings()), - equalTo(Allocation.NONE)); + final Settings clusterSettings = clusterState.get().metaData().settings(); + assertTrue(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.exists(clusterSettings)); + assertThat(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(clusterSettings), equalTo(Allocation.NONE)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverAliasIterationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverAliasIterationTests.java new file mode 100644 index 0000000000000..13d3cfd6cea95 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverAliasIterationTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +public class IndexNameExpressionResolverAliasIterationTests extends IndexNameExpressionResolverTests { + + protected IndexNameExpressionResolver getIndexNameExpressionResolver() { + return new IndexNameExpressionResolver() { + @Override + boolean iterateIndexAliases(int indexAliasesSize, int resolvedExpressionsSize) { + return true; + } + }; + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverExpressionsIterationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverExpressionsIterationTests.java new file mode 100644 index 0000000000000..00d46aad0e8cd --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverExpressionsIterationTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +public class IndexNameExpressionResolverExpressionsIterationTests extends IndexNameExpressionResolverTests { + + protected IndexNameExpressionResolver getIndexNameExpressionResolver() { + return new IndexNameExpressionResolver() { + @Override + boolean iterateIndexAliases(int indexAliasesSize, int resolvedExpressionsSize) { + return false; + } + }; + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 5c96f6f1cbcd4..a8326e3ce401d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -46,6 +46,7 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.common.util.set.Sets.newHashSet; @@ -60,7 +61,17 @@ import static org.hamcrest.Matchers.notNullValue; public class IndexNameExpressionResolverTests extends ESTestCase { - private final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(); + private IndexNameExpressionResolver indexNameExpressionResolver; + + protected IndexNameExpressionResolver getIndexNameExpressionResolver() { + return new IndexNameExpressionResolver(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + indexNameExpressionResolver = getIndexNameExpressionResolver(); + } public void testIndexOptionsStrict() { MetaData.Builder mdBuilder = MetaData.builder() @@ -1002,20 +1013,39 @@ public void testFilterClosedIndicesOnAliases() { assertArrayEquals(new String[] {"test-0"}, strings); } + public void testResolveExpressions() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("test-0").state(State.OPEN).putAlias(AliasMetaData.builder("alias-0").filter("{ \"term\": \"foo\"}"))) + .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetaData.builder("alias-1"))); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + + assertEquals(new HashSet<>(Arrays.asList("alias-0", "alias-1")), + indexNameExpressionResolver.resolveExpressions(state, "alias-*")); + assertEquals(new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")), + indexNameExpressionResolver.resolveExpressions(state, "test-0", "alias-*")); + assertEquals(new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")), + indexNameExpressionResolver.resolveExpressions(state, "test-*", "alias-*")); + assertEquals(new HashSet<>(Arrays.asList("test-1", "alias-1")), + indexNameExpressionResolver.resolveExpressions(state, "*-1")); + } + public void testFilteringAliases() { MetaData.Builder mdBuilder = MetaData.builder() .put(indexBuilder("test-0").state(State.OPEN).putAlias(AliasMetaData.builder("alias-0").filter("{ \"term\": \"foo\"}"))) .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetaData.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - String[] strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "alias-*"); + Set resolvedExpressions = new HashSet<>(Arrays.asList("alias-0", "alias-1")); + String[] strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertArrayEquals(new String[] {"alias-0"}, strings); // concrete index supersedes filtering alias - strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "test-0,alias-*"); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")); + strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); - strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "test-*,alias-*"); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")); + strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); } @@ -1027,9 +1057,36 @@ public void testIndexAliases() { .putAlias(AliasMetaData.builder("test-alias-non-filtering")) ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, "test-*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "test-*"); + + String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, resolvedExpressions); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias-0", "test-alias-1", "test-alias-non-filtering"}, strings); + + strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> x.alias().equals("test-alias-1"), true, + resolvedExpressions); + assertArrayEquals(null, strings); + } + + public void testIndexAliasesSkipIdentity() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("test-0").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias")) + .putAlias(AliasMetaData.builder("other-alias")) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + + Set resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-alias")); + String[] aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, false, resolvedExpressions); + assertNull(aliases); + aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, resolvedExpressions); + assertArrayEquals(new String[] {"test-alias"}, aliases); + + resolvedExpressions = Collections.singleton("other-alias"); + aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, false, resolvedExpressions); + assertArrayEquals(new String[] {"other-alias"}, aliases); + aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, resolvedExpressions); + assertArrayEquals(new String[] {"other-alias"}, aliases); } public void testConcreteWriteIndexSuccessful() { @@ -1039,7 +1096,7 @@ public void testConcreteWriteIndexSuccessful() { .putAlias(AliasMetaData.builder("test-alias").writeIndex(testZeroWriteIndex ? true : null))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); String[] strings = indexNameExpressionResolver - .indexAliases(state, "test-0", x -> true, true, "test-*"); + .indexAliases(state, "test-0", x -> true, true, new HashSet<>(Arrays.asList("test-0", "test-alias"))); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias"}, strings); IndicesRequest request = new IndicesRequest() { @@ -1100,7 +1157,7 @@ public void testConcreteWriteIndexWithWildcardExpansion() { .putAlias(AliasMetaData.builder("test-alias").writeIndex(testZeroWriteIndex ? randomFrom(false, null) : true))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); String[] strings = indexNameExpressionResolver - .indexAliases(state, "test-0", x -> true, true, "test-*"); + .indexAliases(state, "test-0", x -> true, true, new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias"))); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias"}, strings); IndicesRequest request = new IndicesRequest() { @@ -1128,7 +1185,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() { .putAlias(AliasMetaData.builder("test-alias").writeIndex(false))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); String[] strings = indexNameExpressionResolver - .indexAliases(state, "test-0", x -> true, true, "test-*"); + .indexAliases(state, "test-0", x -> true, true, new HashSet<>(Arrays.asList("test-0", "test-alias"))); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias"}, strings); DocWriteRequest request = randomFrom(new IndexRequest("test-alias"), @@ -1148,7 +1205,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() { .putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(false, null)))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); String[] strings = indexNameExpressionResolver - .indexAliases(state, "test-0", x -> true, true, "test-*"); + .indexAliases(state, "test-0", x -> true, true, new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias"))); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias"}, strings); DocWriteRequest request = randomFrom(new IndexRequest("test-alias"), @@ -1169,7 +1226,7 @@ public void testAliasResolutionNotAllowingMultipleIndices() { .putAlias(AliasMetaData.builder("test-alias").writeIndex(randomFrom(!test0WriteIndex, null)))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); String[] strings = indexNameExpressionResolver - .indexAliases(state, "test-0", x -> true, true, "test-*"); + .indexAliases(state, "test-0", x -> true, true, new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias"))); Arrays.sort(strings); assertArrayEquals(new String[] {"test-alias"}, strings); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index c1e341fd5bc2f..50166bd42b37d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -28,12 +28,12 @@ import java.util.Collections; +import static org.hamcrest.Matchers.equalTo; + public class MetaDataIndexUpgradeServiceTests extends ESTestCase { public void testArchiveBrokenIndexSettings() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(), - new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList()); + MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); IndexMetaData src = newIndexMeta("foo", Settings.EMPTY); IndexMetaData indexMetaData = service.archiveBrokenIndexSettings(src); assertSame(indexMetaData, src); @@ -58,10 +58,20 @@ public void testArchiveBrokenIndexSettings() { assertSame(indexMetaData, src); } + public void testAlreadyUpgradedIndexArchivesBrokenIndexSettings() { + final MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); + final IndexMetaData initial = newIndexMeta( + "foo", + Settings.builder().put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).put("index.refresh_interval", "-200").build()); + assertTrue(service.isUpgraded(initial)); + final IndexMetaData after = service.upgradeIndexMetaData(initial, Version.CURRENT.minimumIndexCompatibilityVersion()); + // the index does not need to be upgraded, but checking that it does should archive any broken settings + assertThat(after.getSettings().get("archived.index.refresh_interval"), equalTo("-200")); + assertNull(after.getSettings().get("index.refresh_interval")); + } + public void testUpgrade() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(), - new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList()); + MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); assertFalse(service.isUpgraded(src)); src = service.upgradeIndexMetaData(src, Version.CURRENT.minimumIndexCompatibilityVersion()); @@ -72,9 +82,7 @@ public void testUpgrade() { } public void testIsUpgraded() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(), - new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList()); + MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build()); assertFalse(service.isUpgraded(src)); Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion()); @@ -85,9 +93,7 @@ public void testIsUpgraded() { } public void testFailUpgrade() { - MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, xContentRegistry(), - new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList()); + MetaDataIndexUpgradeService service = getMetaDataIndexUpgradeService(); Version minCompat = Version.CURRENT.minimumIndexCompatibilityVersion(); Version indexUpgraded = VersionUtils.randomVersionBetween(random(), minCompat, VersionUtils.getPreviousVersion(Version.CURRENT)); Version indexCreated = Version.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5)); @@ -141,6 +147,15 @@ public void testPluginUpgradeFailure() { assertEquals(message, "Cannot upgrade index foo"); } + private MetaDataIndexUpgradeService getMetaDataIndexUpgradeService() { + return new MetaDataIndexUpgradeService( + Settings.EMPTY, + xContentRegistry(), + new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + Collections.emptyList()); + } + public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) @@ -152,4 +167,5 @@ public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { .build(); return IndexMetaData.builder(name).settings(build).build(); } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 9a7e25d29bb08..f4b834e4d29a6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; @@ -84,7 +85,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { // disruption tests need MockTransportService - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class); } public void testBulkWeirdScenario() throws Exception { @@ -92,7 +93,9 @@ public void testBulkWeirdScenario() throws Exception { internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) + .put("index.global_checkpoint_sync.interval", "1s")) + .get()); ensureGreen(); BulkResponse bulkResponse = client().prepareBulk() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java new file mode 100644 index 0000000000000..9fcd3d97f1fbe --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.test.gateway.TestGatewayAllocator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class EnableAllocationShortCircuitTests extends ESAllocationTestCase { + + private static ClusterState createClusterStateWithAllShardsAssigned() { + AllocationService allocationService = createAllocationService(Settings.EMPTY); + + final int numberOfNodes = randomIntBetween(1, 5); + final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < numberOfNodes; i++) { + discoveryNodesBuilder.add(newNode("node" + i)); + } + + final MetaData.Builder metadataBuilder = MetaData.builder(); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + for (int i = randomIntBetween(1, 10); i >= 0; i--) { + final IndexMetaData indexMetaData = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(randomIntBetween(0, numberOfNodes - 1)).build(); + metadataBuilder.put(indexMetaData, true); + routingTableBuilder.addAsNew(indexMetaData); + } + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(Settings.EMPTY)) + .nodes(discoveryNodesBuilder).metaData(metadataBuilder).routingTable(routingTableBuilder.build()).build(); + + while (clusterState.getRoutingNodes().hasUnassignedShards() + || clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).isEmpty() == false) { + clusterState = allocationService.applyStartedShards(clusterState, + clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING)); + clusterState = allocationService.reroute(clusterState, "reroute"); + } + + return clusterState; + } + + public void testRebalancingAttemptedIfPermitted() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Rebalance.ALL, + EnableAllocationDecider.Rebalance.PRIMARIES, + EnableAllocationDecider.Rebalance.REPLICAS).name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testRebalancingSkippedIfDisabled() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, equalTo(0)); + } + + public void testRebalancingAttemptedIfDisabledButOverridenOnSpecificIndices() { + ClusterState clusterState = createClusterStateWithAllShardsAssigned(); + final IndexMetaData indexMetaData = randomFrom(clusterState.metaData().indices().values().toArray(IndexMetaData.class)); + clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(Settings.builder().put(indexMetaData.getSettings()) + .put(INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), + randomFrom(EnableAllocationDecider.Rebalance.ALL, + EnableAllocationDecider.Rebalance.PRIMARIES, + EnableAllocationDecider.Rebalance.REPLICAS).name()))).build()).build(); + + final RebalanceShortCircuitPlugin plugin = new RebalanceShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()), + plugin); + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.rebalanceAttempts, greaterThan(0)); + } + + public void testAllocationSkippedIfDisabled() { + final AllocateShortCircuitPlugin plugin = new AllocateShortCircuitPlugin(); + AllocationService allocationService = createAllocationService(Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE.name()), + plugin); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + allocationService.reroute(clusterState, "reroute").routingTable(); + assertThat(plugin.canAllocateAttempts, equalTo(0)); + } + + private static AllocationService createAllocationService(Settings.Builder settings, ClusterPlugin plugin) { + final ClusterSettings emptyClusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + List deciders = new ArrayList<>(ClusterModule.createAllocationDeciders(settings.build(), emptyClusterSettings, + Collections.singletonList(plugin))); + return new MockAllocationService( + new AllocationDeciders(deciders), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + } + + private static class RebalanceShortCircuitPlugin implements ClusterPlugin { + int rebalanceAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new RebalanceShortCircuitAllocationDecider()); + } + + private class RebalanceShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(shardRouting, allocation); + } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { + rebalanceAttempts++; + return super.canRebalance(allocation); + } + } + } + + private static class AllocateShortCircuitPlugin implements ClusterPlugin { + int canAllocateAttempts; + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return Collections.singletonList(new AllocateShortCircuitAllocationDecider()); + } + + private class AllocateShortCircuitAllocationDecider extends AllocationDecider { + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, node, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(shardRouting, allocation); + } + + @Override + public Decision canAllocate(IndexMetaData indexMetaData, RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(indexMetaData, node, allocation); + } + + @Override + public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { + canAllocateAttempts++; + return super.canAllocate(node, allocation); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index a8fa5e4de14c0..e10a2798e1642 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -20,11 +20,14 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -39,7 +42,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.VersionUtils; @@ -47,6 +52,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -218,4 +224,142 @@ public void testObjectReuseWhenApplyingClusterStateDiff() throws Exception { assertSame("template", serializedClusterState2.metaData().templates().get("test-template"), serializedClusterState3.metaData().templates().get("test-template")); } + + public static class TestCustomOne extends AbstractNamedDiffable implements Custom { + + public static final String TYPE = "test_custom_one"; + private final String strObject; + + public TestCustomOne(String strObject) { + this.strObject = strObject; + } + + public TestCustomOne(StreamInput in) throws IOException { + this.strObject = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(strObject); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("custom_string_object", strObject); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + } + + public static class TestCustomTwo extends AbstractNamedDiffable implements Custom { + + public static final String TYPE = "test_custom_two"; + private final Integer intObject; + + public TestCustomTwo(Integer intObject) { + this.intObject = intObject; + } + + public TestCustomTwo(StreamInput in) throws IOException { + this.intObject = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(intObject); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("custom_integer_object", intObject); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Custom.class, TYPE, in); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + } + + public void testCustomSerialization() throws Exception { + ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE) + .putCustom(TestCustomOne.TYPE, new TestCustomOne("test_custom_one")) + .putCustom(TestCustomTwo.TYPE, new TestCustomTwo(10)); + + ClusterState clusterState = builder.incrementVersion().build(); + + Diff diffs = clusterState.diff(ClusterState.EMPTY_STATE); + + // Add the new customs to named writeables + final List entries = ClusterModule.getNamedWriteables(); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomOne.TYPE, TestCustomOne::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomOne.TYPE, TestCustomOne::readDiffFrom)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomTwo.TYPE, TestCustomTwo::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomTwo.TYPE, TestCustomTwo::readDiffFrom)); + + // serialize with current version + BytesStreamOutput outStream = new BytesStreamOutput(); + Version version = Version.CURRENT; + outStream.setVersion(version); + diffs.writeTo(outStream); + StreamInput inStream = outStream.bytes().streamInput(); + + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(entries)); + inStream.setVersion(version); + Diff serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + + // Current version - Both the customs are non null + assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), notNullValue()); + assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + + // serialize with minimum compatibile version + outStream = new BytesStreamOutput(); + version = Version.CURRENT.minimumCompatibilityVersion(); + outStream.setVersion(version); + diffs.writeTo(outStream); + inStream = outStream.bytes().streamInput(); + + inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(entries)); + inStream.setVersion(version); + serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode()); + stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); + + // Old version - TestCustomOne is null and TestCustomTwo is not null + assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), nullValue()); + assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/NumbersTests.java b/server/src/test/java/org/elasticsearch/common/NumbersTests.java index 46378ccc9e9fb..4cab3206b7fd0 100644 --- a/server/src/test/java/org/elasticsearch/common/NumbersTests.java +++ b/server/src/test/java/org/elasticsearch/common/NumbersTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common; +import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.elasticsearch.test.ESTestCase; import java.math.BigDecimal; @@ -27,19 +28,26 @@ public class NumbersTests extends ESTestCase { + @Timeout(millis = 10000) public void testToLong() { assertEquals(3L, Numbers.toLong("3", false)); assertEquals(3L, Numbers.toLong("3.1", true)); assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", false)); assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", false)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", true)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.99", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.99", true)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("9223372036854775808", false)); - assertEquals("Value [9223372036854775808] is out of range for a long", e.getMessage()); + assertEquals("Value [9223372036854775808] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("9223372036854775808", false)).getMessage()); + assertEquals("Value [-9223372036854775809] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-9223372036854775809", false)).getMessage()); - e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("-9223372036854775809", false)); - assertEquals("Value [-9223372036854775809] is out of range for a long", e.getMessage()); + assertEquals("Value [1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("1e99999999", false)).getMessage()); + assertEquals("Value [-1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-1e99999999", false)).getMessage()); } public void testToLongExact() { diff --git a/server/src/test/java/org/elasticsearch/common/RoundingTests.java b/server/src/test/java/org/elasticsearch/common/RoundingTests.java index a809131b932e2..bee3f57764f32 100644 --- a/server/src/test/java/org/elasticsearch/common/RoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/RoundingTests.java @@ -57,6 +57,30 @@ public void testUTCTimeUnitRounding() { tzRounding = Rounding.builder(Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.QUARTER_OF_YEAR).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-04-01T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T01:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.YEAR_OF_CENTURY).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2013-01-01T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.MINUTES_OF_HOUR).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:01:00.000Z"), tz)); + + tzRounding = Rounding.builder(Rounding.DateTimeUnit.SECOND_OF_MINUTE).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:01.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:00:01.000Z"), tz)); } public void testUTCIntervalRounding() { @@ -667,7 +691,7 @@ private void assertInterval(long rounded, long nextRoundingValue, Rounding round } /** - * perform a number on assertions and checks on {@link org.elasticsearch.common.rounding.Rounding.TimeUnitRounding} intervals + * perform a number on assertions and checks on {@link org.elasticsearch.common.Rounding.TimeUnitRounding} intervals * @param rounded the expected low end of the rounding interval * @param unrounded a date in the interval to be checked for rounding * @param nextRoundingValue the expected upper end of the rounding interval diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java index 9230cded82b1d..7bd24aec8de90 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java @@ -37,6 +37,6 @@ protected BlobStore newBlobStore() throws IOException { } else { settings = Settings.EMPTY; } - return new FsBlobStore(settings, createTempDir()); + return new FsBlobStore(settings, createTempDir(), false); } } diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java index 59e4ffd7927ca..4a1b1e1016fb9 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java @@ -42,15 +42,14 @@ protected BlobStore newBlobStore() throws IOException { } else { settings = Settings.EMPTY; } - return new FsBlobStore(settings, createTempDir()); + return new FsBlobStore(settings, createTempDir(), false); } public void testReadOnly() throws Exception { - Settings settings = Settings.builder().put("readonly", true).build(); Path tempDir = createTempDir(); Path path = tempDir.resolve("bar"); - try (FsBlobStore store = new FsBlobStore(settings, path)) { + try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, true)) { assertFalse(Files.exists(path)); BlobPath blobPath = BlobPath.cleanPath().add("foo"); store.blobContainer(blobPath); @@ -61,8 +60,7 @@ public void testReadOnly() throws Exception { assertFalse(Files.exists(storePath)); } - settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("readonly", false).build(); - try (FsBlobStore store = new FsBlobStore(settings, path)) { + try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, false)) { assertTrue(Files.exists(path)); BlobPath blobPath = BlobPath.cleanPath().add("foo"); BlobContainer container = store.blobContainer(blobPath); diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index cd92061ae25d5..c3a541fe87ec2 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import org.joda.time.format.ISODateTimeFormat; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -99,6 +100,7 @@ public void testDuellingFormatsValidParsing() { assertSameDate("20181126T121212+01:00", "basic_date_time_no_millis"); assertSameDate("20181126T121212+0100", "basic_date_time_no_millis"); assertSameDate("2018363", "basic_ordinal_date"); + assertSameDate("2018363T121212.1Z", "basic_ordinal_date_time"); assertSameDate("2018363T121212.123Z", "basic_ordinal_date_time"); assertSameDate("2018363T121212.123456789Z", "basic_ordinal_date_time"); assertSameDate("2018363T121212.123+0100", "basic_ordinal_date_time"); @@ -106,15 +108,19 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018363T121212Z", "basic_ordinal_date_time_no_millis"); assertSameDate("2018363T121212+0100", "basic_ordinal_date_time_no_millis"); assertSameDate("2018363T121212+01:00", "basic_ordinal_date_time_no_millis"); + assertSameDate("121212.1Z", "basic_time"); assertSameDate("121212.123Z", "basic_time"); assertSameDate("121212.123456789Z", "basic_time"); + assertSameDate("121212.1+0100", "basic_time"); assertSameDate("121212.123+0100", "basic_time"); assertSameDate("121212.123+01:00", "basic_time"); assertSameDate("121212Z", "basic_time_no_millis"); assertSameDate("121212+0100", "basic_time_no_millis"); assertSameDate("121212+01:00", "basic_time_no_millis"); + assertSameDate("T121212.1Z", "basic_t_time"); assertSameDate("T121212.123Z", "basic_t_time"); assertSameDate("T121212.123456789Z", "basic_t_time"); + assertSameDate("T121212.1+0100", "basic_t_time"); assertSameDate("T121212.123+0100", "basic_t_time"); assertSameDate("T121212.123+01:00", "basic_t_time"); assertSameDate("T121212Z", "basic_t_time_no_millis"); @@ -123,6 +129,7 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018W313", "basic_week_date"); assertSameDate("1W313", "basic_week_date"); assertSameDate("18W313", "basic_week_date"); + assertSameDate("2018W313T121212.1Z", "basic_week_date_time"); assertSameDate("2018W313T121212.123Z", "basic_week_date_time"); assertSameDate("2018W313T121212.123456789Z", "basic_week_date_time"); assertSameDate("2018W313T121212.123+0100", "basic_week_date_time"); @@ -144,8 +151,10 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T12:12:12", "date_hour_minute_second"); assertSameDate("2018-12-31T12:12:1", "date_hour_minute_second"); + assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123", "date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123456789", "date_hour_minute_second_fraction"); + assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.123", "date_hour_minute_second_millis"); assertParseException("2018-12-31T12:12:12.123456789", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_millis"); @@ -156,11 +165,14 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-05-30T20", "date_optional_time"); assertSameDate("2018-05-30T20:21", "date_optional_time"); assertSameDate("2018-05-30T20:21:23", "date_optional_time"); + assertSameDate("2018-05-30T20:21:23.1", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123456789", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123Z", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123456789Z", "date_optional_time"); + assertSameDate("2018-05-30T20:21:23.1+0100", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123+0100", "date_optional_time"); + assertSameDate("2018-05-30T20:21:23.1+01:00", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123+01:00", "date_optional_time"); assertSameDate("2018-12-1", "date_optional_time"); assertSameDate("2018-12-31T10:15:30", "date_optional_time"); @@ -168,17 +180,23 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T10:5:30", "date_optional_time"); assertSameDate("2018-12-31T1:15:30", "date_optional_time"); + assertSameDate("2018-12-31T10:15:30.1Z", "date_time"); assertSameDate("2018-12-31T10:15:30.123Z", "date_time"); assertSameDate("2018-12-31T10:15:30.123456789Z", "date_time"); + assertSameDate("2018-12-31T10:15:30.1+0100", "date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "date_time"); + assertSameDate("2018-12-31T10:15:30.1+01:00", "date_time"); assertSameDate("2018-12-31T10:15:30.11Z", "date_time"); assertSameDate("2018-12-31T10:15:30.11+0100", "date_time"); assertSameDate("2018-12-31T10:15:30.11+01:00", "date_time"); + assertSameDate("2018-12-31T10:15:3.1Z", "date_time"); assertSameDate("2018-12-31T10:15:3.123Z", "date_time"); assertSameDate("2018-12-31T10:15:3.123456789Z", "date_time"); + assertSameDate("2018-12-31T10:15:3.1+0100", "date_time"); assertSameDate("2018-12-31T10:15:3.123+0100", "date_time"); assertSameDate("2018-12-31T10:15:3.123+01:00", "date_time"); + assertSameDate("2018-12-31T10:15:3.1+01:00", "date_time"); assertSameDate("2018-12-31T10:15:30Z", "date_time_no_millis"); assertSameDate("2018-12-31T10:15:30+0100", "date_time_no_millis"); @@ -217,10 +235,12 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-128", "ordinal_date"); assertSameDate("2018-1", "ordinal_date"); + assertSameDate("2018-128T10:15:30.1Z", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123Z", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123456789Z", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+0100", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+01:00", "ordinal_date_time"); + assertSameDate("2018-1T10:15:30.1Z", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123Z", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123456789Z", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123+0100", "ordinal_date_time"); @@ -233,16 +253,20 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-1T10:15:30+0100", "ordinal_date_time_no_millis"); assertSameDate("2018-1T10:15:30+01:00", "ordinal_date_time_no_millis"); + assertSameDate("10:15:30.1Z", "time"); assertSameDate("10:15:30.123Z", "time"); assertSameDate("10:15:30.123456789Z", "time"); assertSameDate("10:15:30.123+0100", "time"); assertSameDate("10:15:30.123+01:00", "time"); + assertSameDate("1:15:30.1Z", "time"); assertSameDate("1:15:30.123Z", "time"); assertSameDate("1:15:30.123+0100", "time"); assertSameDate("1:15:30.123+01:00", "time"); + assertSameDate("10:1:30.1Z", "time"); assertSameDate("10:1:30.123Z", "time"); assertSameDate("10:1:30.123+0100", "time"); assertSameDate("10:1:30.123+01:00", "time"); + assertSameDate("10:15:3.1Z", "time"); assertSameDate("10:15:3.123Z", "time"); assertSameDate("10:15:3.123+0100", "time"); assertSameDate("10:15:3.123+01:00", "time"); @@ -266,10 +290,13 @@ public void testDuellingFormatsValidParsing() { assertSameDate("10:15:3+01:00", "time_no_millis"); assertParseException("10:15:3", "time_no_millis"); + assertSameDate("T10:15:30.1Z", "t_time"); assertSameDate("T10:15:30.123Z", "t_time"); assertSameDate("T10:15:30.123456789Z", "t_time"); + assertSameDate("T10:15:30.1+0100", "t_time"); assertSameDate("T10:15:30.123+0100", "t_time"); assertSameDate("T10:15:30.123+01:00", "t_time"); + assertSameDate("T10:15:30.1+01:00", "t_time"); assertSameDate("T1:15:30.123Z", "t_time"); assertSameDate("T1:15:30.123+0100", "t_time"); assertSameDate("T1:15:30.123+01:00", "t_time"); @@ -304,12 +331,18 @@ public void testDuellingFormatsValidParsing() { "Cannot parse \"2012-W1-8\": Value 8 for dayOfWeek must be in the range [1,7]"); assertJavaTimeParseException("2012-W1-8", "week_date"); + assertSameDate("2012-W48-6T10:15:30.1Z", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123Z", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123456789Z", "week_date_time"); + assertSameDate("2012-W48-6T10:15:30.1+0100", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "week_date_time"); + assertSameDate("2012-W48-6T10:15:30.1+01:00", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+01:00", "week_date_time"); + assertSameDate("2012-W1-6T10:15:30.1Z", "week_date_time"); assertSameDate("2012-W1-6T10:15:30.123Z", "week_date_time"); + assertSameDate("2012-W1-6T10:15:30.1+0100", "week_date_time"); assertSameDate("2012-W1-6T10:15:30.123+0100", "week_date_time"); + assertSameDate("2012-W1-6T10:15:30.1+01:00", "week_date_time"); assertSameDate("2012-W1-6T10:15:30.123+01:00", "week_date_time"); assertSameDate("2012-W48-6T10:15:30Z", "week_date_time_no_millis"); @@ -342,12 +375,26 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2012-W1-1", "weekyear_week_day"); } + public void testCompositeParsing(){ + //in all these examples the second pattern will be used + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "strictDateTimeNoMillis||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss+HH:MM||yyyy-MM-dd'T'HH:mm:ss.SSS"); + } + + public void testExceptionWhenCompositeParsingFails(){ + assertParseException("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SS"); + } + public void testDuelingStrictParsing() { assertSameDate("2018W313", "strict_basic_week_date"); assertParseException("18W313", "strict_basic_week_date"); + assertSameDate("2018W313T121212.1Z", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123Z", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123456789Z", "strict_basic_week_date_time"); + assertSameDate("2018W313T121212.1+0100", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123+0100", "strict_basic_week_date_time"); + assertSameDate("2018W313T121212.1+01:00", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123+01:00", "strict_basic_week_date_time"); assertParseException("2018W313T12128.123Z", "strict_basic_week_date_time"); assertParseException("2018W313T12128.123456789Z", "strict_basic_week_date_time"); @@ -375,6 +422,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-31T8:3", "strict_date_hour_minute"); assertSameDate("2018-12-31T12:12:12", "strict_date_hour_minute_second"); assertParseException("2018-12-31T12:12:1", "strict_date_hour_minute_second"); + assertSameDate("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123456789", "strict_date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_millis"); @@ -395,9 +443,12 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-31T10:5:30", "strict_date_optional_time"); assertParseException("2018-12-31T9:15:30", "strict_date_optional_time"); assertSameDate("2015-01-04T00:00Z", "strict_date_optional_time"); + assertSameDate("2018-12-31T10:15:30.1Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123456789Z", "strict_date_time"); + assertSameDate("2018-12-31T10:15:30.1+0100", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "strict_date_time"); + assertSameDate("2018-12-31T10:15:30.1+01:00", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.11Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.11+0100", "strict_date_time"); @@ -430,9 +481,12 @@ public void testDuelingStrictParsing() { assertSameDate("2018-128", "strict_ordinal_date"); assertParseException("2018-1", "strict_ordinal_date"); + assertSameDate("2018-128T10:15:30.1Z", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123Z", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123456789Z", "strict_ordinal_date_time"); + assertSameDate("2018-128T10:15:30.1+0100", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+0100", "strict_ordinal_date_time"); + assertSameDate("2018-128T10:15:30.1+01:00", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+01:00", "strict_ordinal_date_time"); assertParseException("2018-1T10:15:30.123Z", "strict_ordinal_date_time"); @@ -441,6 +495,7 @@ public void testDuelingStrictParsing() { assertSameDate("2018-128T10:15:30+01:00", "strict_ordinal_date_time_no_millis"); assertParseException("2018-1T10:15:30Z", "strict_ordinal_date_time_no_millis"); + assertSameDate("10:15:30.1Z", "strict_time"); assertSameDate("10:15:30.123Z", "strict_time"); assertSameDate("10:15:30.123456789Z", "strict_time"); assertSameDate("10:15:30.123+0100", "strict_time"); @@ -462,9 +517,12 @@ public void testDuelingStrictParsing() { assertParseException("10:15:3Z", "strict_time_no_millis"); assertParseException("10:15:3", "strict_time_no_millis"); + assertSameDate("T10:15:30.1Z", "strict_t_time"); assertSameDate("T10:15:30.123Z", "strict_t_time"); assertSameDate("T10:15:30.123456789Z", "strict_t_time"); + assertSameDate("T10:15:30.1+0100", "strict_t_time"); assertSameDate("T10:15:30.123+0100", "strict_t_time"); + assertSameDate("T10:15:30.1+01:00", "strict_t_time"); assertSameDate("T10:15:30.123+01:00", "strict_t_time"); assertParseException("T1:15:30.123Z", "strict_t_time"); assertParseException("T10:1:30.123Z", "strict_t_time"); @@ -493,9 +551,12 @@ public void testDuelingStrictParsing() { "Cannot parse \"2012-W01-8\": Value 8 for dayOfWeek must be in the range [1,7]"); assertJavaTimeParseException("2012-W01-8", "strict_week_date"); + assertSameDate("2012-W48-6T10:15:30.1Z", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123Z", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123456789Z", "strict_week_date_time"); + assertSameDate("2012-W48-6T10:15:30.1+0100", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "strict_week_date_time"); + assertSameDate("2012-W48-6T10:15:30.1+01:00", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+01:00", "strict_week_date_time"); assertParseException("2012-W1-6T10:15:30.123Z", "strict_week_date_time"); @@ -665,6 +726,29 @@ public void testSeveralTimeFormats() { } } + // the iso 8601 parser is available via Joda.forPattern(), so we have to test this slightly differently + public void testIso8601Parsers() { + String format = "iso8601"; + org.joda.time.format.DateTimeFormatter isoFormatter = ISODateTimeFormat.dateTimeParser().withZone(DateTimeZone.UTC); + JodaDateFormatter jodaFormatter = new JodaDateFormatter(format, isoFormatter, isoFormatter); + DateFormatter javaFormatter = DateFormatter.forPattern(format); + + assertSameDate("2018-10-10T", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12.123", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12.123Z", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12,123", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12,123Z", format, jodaFormatter, javaFormatter); + } + + public void testParsingMissingTimezone() { + long millisJava = DateFormatter.forPattern("8yyyy-MM-dd HH:mm:ss").parseMillis("2018-02-18 17:47:17"); + long millisJoda = DateFormatter.forPattern("yyyy-MM-dd HH:mm:ss").parseMillis("2018-02-18 17:47:17"); + assertThat(millisJava, is(millisJoda)); + } + private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, DateTime jodaDate) { assertThat(jodaDate.getMillis(), is(javaDate.toInstant().toEpochMilli())); String javaTimeOut = DateFormatter.forPattern(format).format(javaDate); diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java index d72b598f02865..e8c658a40c3e1 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -33,7 +33,8 @@ import static org.hamcrest.Matchers.equalTo; public class JsonThrowablePatternConverterTests extends ESTestCase { - JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + private static final String LINE_SEPARATOR = System.lineSeparator(); + private JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); public void testNoStacktrace() throws IOException { LogEvent event = Log4jLogEvent.newBuilder() @@ -50,15 +51,15 @@ public void testNoStacktrace() throws IOException { public void testStacktraceWithJson() throws IOException { LogManager.getLogger().info("asdf"); - String json = "{\n" + - " \"terms\" : {\n" + - " \"user\" : [\n" + - " \"u1\",\n" + - " \"u2\",\n" + - " \"u3\"\n" + - " ],\n" + - " \"boost\" : 1.0\n" + - " }\n" + + String json = "{" + LINE_SEPARATOR + + " \"terms\" : {" + LINE_SEPARATOR + + " \"user\" : [" + LINE_SEPARATOR + + " \"u1\"," + LINE_SEPARATOR + + " \"u2\"," + LINE_SEPARATOR + + " \"u3\"" + LINE_SEPARATOR + + " ]," + LINE_SEPARATOR + + " \"boost\" : 1.0" + LINE_SEPARATOR + + " }" + LINE_SEPARATOR + "}"; Exception thrown = new Exception(json); LogEvent event = Log4jLogEvent.newBuilder() @@ -74,7 +75,7 @@ public void testStacktraceWithJson() throws IOException { .findFirst() .orElseThrow(() -> new AssertionError("no logs parsed")); - int jsonLength = json.split("\n").length; + int jsonLength = json.split(LINE_SEPARATOR).length; int stacktraceLength = thrown.getStackTrace().length; assertThat("stacktrace should formatted in multiple lines", jsonLogLine.stacktrace().size(), equalTo(jsonLength + stacktraceLength)); diff --git a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java index f188eb4cac6f4..6c92d3d1f9a19 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -19,11 +19,6 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; - -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; import static org.elasticsearch.common.rounding.DateTimeUnit.DAY_OF_MONTH; import static org.elasticsearch.common.rounding.DateTimeUnit.HOUR_OF_DAY; @@ -33,7 +28,6 @@ import static org.elasticsearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE; import static org.elasticsearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR; import static org.elasticsearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY; -import static org.hamcrest.Matchers.is; public class DateTimeUnitTests extends ESTestCase { @@ -65,17 +59,4 @@ public void testEnumIds() { assertEquals(8, SECOND_OF_MINUTE.id()); assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); } - - public void testConversion() { - long millis = randomLongBetween(0, Instant.now().toEpochMilli()); - DateTimeZone zone = randomDateTimeZone(); - ZoneId zoneId = zone.toTimeZone().toZoneId(); - - int offsetSeconds = zoneId.getRules().getOffset(Instant.ofEpochMilli(millis)).getTotalSeconds(); - long parsedMillisJavaTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) - .minusSeconds(offsetSeconds).toInstant().toEpochMilli(); - - long parsedMillisJodaTime = zone.convertLocalToUTC(millis, true); - assertThat(parsedMillisJavaTime, is(parsedMillisJodaTime)); - } } diff --git a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java index 3ee4ce0e7d7bf..b791d0e3ce9dd 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.joda.time.DateTimeZone; import java.time.ZoneOffset; @@ -56,6 +57,26 @@ public void testSerialization() throws Exception { assertThat(roundingJoda.nextRoundingValue(randomInt), is(roundingJavaTime.nextRoundingValue(randomInt))); } + public void testDuellingImplementations() { + org.elasticsearch.common.Rounding.DateTimeUnit randomDateTimeUnit = + randomFrom(org.elasticsearch.common.Rounding.DateTimeUnit.values()); + org.elasticsearch.common.Rounding rounding; + Rounding roundingJoda; + + if (randomBoolean()) { + rounding = org.elasticsearch.common.Rounding.builder(randomDateTimeUnit).timeZone(ZoneOffset.UTC).build(); + DateTimeUnit dateTimeUnit = DateTimeUnit.resolve(randomDateTimeUnit.getId()); + roundingJoda = Rounding.builder(dateTimeUnit).timeZone(DateTimeZone.UTC).build(); + } else { + TimeValue interval = timeValue(); + rounding = org.elasticsearch.common.Rounding.builder(interval).timeZone(ZoneOffset.UTC).build(); + roundingJoda = Rounding.builder(interval).timeZone(DateTimeZone.UTC).build(); + } + + long roundValue = randomLong(); + assertThat(roundingJoda.round(roundValue), is(rounding.round(roundValue))); + } + static TimeValue timeValue() { return TimeValue.parseTimeValue(randomIntBetween(1, 1000) + randomFrom(ALLOWED_TIME_SUFFIXES), "settingName"); } diff --git a/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index e49f25772a726..029eb3b041d3d 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -56,6 +56,30 @@ public void testUTCTimeUnitRounding() { tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.QUARTER).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-04-01T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T01:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2013-01-01T00:00:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:00.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:01:00.000Z"), tz)); + + tzRounding = Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(); + assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:01.000Z"), tz)); + assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:00:01.000Z"), tz)); } public void testUTCIntervalRounding() { diff --git a/server/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/server/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index 07ce84b0b7599..45b333ded0a80 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -22,6 +22,7 @@ import java.io.ByteArrayInputStream; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.Locale; import java.util.Map; import org.elasticsearch.cli.Command; @@ -30,6 +31,7 @@ import org.elasticsearch.env.Environment; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase { InputStream input; @@ -139,6 +141,16 @@ public void testMissingSettingName() throws Exception { assertThat(e.getMessage(), containsString("The setting name can not be null")); } + public void testUpperCaseInName() throws Exception { + createKeystore(""); + terminal.addSecretInput("value"); + final String key = randomAlphaOfLength(4) + randomAlphaOfLength(1).toUpperCase(Locale.ROOT) + randomAlphaOfLength(4); + final UserException e = expectThrows(UserException.class, () -> execute(key)); + assertThat( + e, + hasToString(containsString("Setting name [" + key + "] does not match the allowed setting name pattern [[a-z0-9_\\-.]+]"))); + } + void setInput(String inputStr) { input = new ByteArrayInputStream(inputStr.getBytes(StandardCharsets.UTF_8)); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index bb2b1df7f8c03..68f434b1796b2 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -19,6 +19,18 @@ package org.elasticsearch.common.settings; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + import javax.crypto.Cipher; import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; @@ -26,36 +38,30 @@ import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; + import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; +import java.nio.file.Files; import java.nio.file.Path; +import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.SecureRandom; import java.util.ArrayList; +import java.util.Arrays; import java.util.Base64; +import java.util.HashSet; import java.util.List; -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.Randomness; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.env.Environment; -import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; -import org.junit.After; -import org.junit.Before; - import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; public class KeyStoreWrapperTests extends ESTestCase { @@ -386,4 +392,56 @@ public void testBackcompatV2() throws Exception { assertEquals(-1, fileInput.read()); } } + + public void testStringAndFileDistinction() throws Exception { + final KeyStoreWrapper wrapper = KeyStoreWrapper.create(); + wrapper.setString("string_setting", "string_value".toCharArray()); + final Path temp = createTempDir(); + Files.write(temp.resolve("file_setting"), "file_value".getBytes(StandardCharsets.UTF_8)); + wrapper.setFile("file_setting", Files.readAllBytes(temp.resolve("file_setting"))); + wrapper.save(env.configFile(), new char[0]); + wrapper.close(); + + final KeyStoreWrapper afterSave = KeyStoreWrapper.load(env.configFile()); + assertNotNull(afterSave); + afterSave.decrypt(new char[0]); + assertThat(afterSave.getSettingNames(), equalTo(new HashSet<>(Arrays.asList("keystore.seed", "string_setting", "file_setting")))); + assertThat(afterSave.getString("string_setting"), equalTo("string_value")); + assertThat(toByteArray(afterSave.getFile("string_setting")), equalTo("string_value".getBytes(StandardCharsets.UTF_8))); + assertThat(afterSave.getString("file_setting"), equalTo("file_value")); + assertThat(toByteArray(afterSave.getFile("file_setting")), equalTo("file_value".getBytes(StandardCharsets.UTF_8))); + } + + public void testLegacyV3() throws GeneralSecurityException, IOException { + final Path configDir = createTempDir(); + final Path keystore = configDir.resolve("elasticsearch.keystore"); + try (InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore"); + OutputStream os = Files.newOutputStream(keystore)) { + final byte[] buffer = new byte[4096]; + int readBytes; + while ((readBytes = is.read(buffer)) > 0) { + os.write(buffer, 0, readBytes); + } + } + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); + assertNotNull(wrapper); + wrapper.decrypt(new char[0]); + assertThat(wrapper.getFormatVersion(), equalTo(3)); + assertThat(wrapper.getSettingNames(), equalTo(new HashSet<>(Arrays.asList("keystore.seed", "string_setting", "file_setting")))); + assertThat(wrapper.getString("string_setting"), equalTo("string_value")); + assertThat(toByteArray(wrapper.getFile("string_setting")), equalTo("string_value".getBytes(StandardCharsets.UTF_8))); + assertThat(wrapper.getString("file_setting"), equalTo("file_value")); + assertThat(toByteArray(wrapper.getFile("file_setting")), equalTo("file_value".getBytes(StandardCharsets.UTF_8))); + } + + private byte[] toByteArray(final InputStream is) throws IOException { + final ByteArrayOutputStream os = new ByteArrayOutputStream(); + final byte[] buffer = new byte[1024]; + int readBytes; + while ((readBytes = is.read(buffer)) > 0) { + os.write(buffer, 0, readBytes); + } + return os.toByteArray(); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java new file mode 100644 index 0000000000000..ec9a1432539d4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasToString; + +public class UpgradeKeyStoreCommandTests extends KeyStoreCommandTestCase { + + @Override + protected Command newCommand() { + return new UpgradeKeyStoreCommand() { + + @Override + protected Environment createEnv(final Map settings) { + return env; + } + + }; + } + + public void testKeystoreUpgrade() throws Exception { + final Path keystore = KeyStoreWrapper.keystorePath(env.configFile()); + try (InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore"); + OutputStream os = Files.newOutputStream(keystore)) { + final byte[] buffer = new byte[4096]; + int read; + while ((read = is.read(buffer, 0, buffer.length)) >= 0) { + os.write(buffer, 0, read); + } + } + try (KeyStoreWrapper beforeUpgrade = KeyStoreWrapper.load(env.configFile())) { + assertNotNull(beforeUpgrade); + assertThat(beforeUpgrade.getFormatVersion(), equalTo(3)); + } + execute(); + try (KeyStoreWrapper afterUpgrade = KeyStoreWrapper.load(env.configFile())) { + assertNotNull(afterUpgrade); + assertThat(afterUpgrade.getFormatVersion(), equalTo(KeyStoreWrapper.FORMAT_VERSION)); + afterUpgrade.decrypt(new char[0]); + assertThat(afterUpgrade.getSettingNames(), hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + } + } + + public void testKeystoreDoesNotExist() { + final UserException e = expectThrows(UserException.class, this::execute); + assertThat(e, hasToString(containsString("keystore does not exist at [" + KeyStoreWrapper.keystorePath(env.configFile()) + "]"))); + } + +} diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index acb202f80fc55..23f08cf8ddfdf 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -21,8 +21,10 @@ import org.elasticsearch.test.ESTestCase; +import java.time.Clock; import java.time.Instant; import java.time.ZoneId; +import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoField; @@ -96,6 +98,8 @@ public void testEpochSecondParserWithFraction() { assertThat(e.getMessage(), is("failed to parse date field [1234.1234567890] with format [epoch_second]")); } + + public void testEpochMilliParsersWithDifferentFormatters() { DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); TemporalAccessor accessor = formatter.parse("123"); @@ -248,4 +252,52 @@ public void testRoundupFormatterLocale() { assertThat(roundupParser.getLocale(), is(locale)); assertThat(formatter.locale(), is(locale)); } + + public void test0MillisAreFormatted() { + DateFormatter formatter = DateFormatter.forPattern("strict_date_time"); + Clock clock = Clock.fixed(ZonedDateTime.of(2019, 02, 8, 11, 43, 00, 0, + ZoneOffset.UTC).toInstant(), ZoneOffset.UTC); + String formatted = formatter.formatMillis(clock.millis()); + assertThat(formatted, is("2019-02-08T11:43:00.000Z")); + } + + public void testFractionalSeconds() { + DateFormatter formatter = DateFormatters.forPattern("strict_date_optional_time"); + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1Z")); + assertThat(instant.getNano(), is(100_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12Z")); + assertThat(instant.getNano(), is(120_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123Z")); + assertThat(instant.getNano(), is(123_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1234Z")); + assertThat(instant.getNano(), is(123_400_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12345Z")); + assertThat(instant.getNano(), is(123_450_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123456Z")); + assertThat(instant.getNano(), is(123_456_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1234567Z")); + assertThat(instant.getNano(), is(123_456_700)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12345678Z")); + assertThat(instant.getNano(), is(123_456_780)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123456789Z")); + assertThat(instant.getNano(), is(123_456_789)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/time/DateUtilsRoundingTests.java b/server/src/test/java/org/elasticsearch/common/time/DateUtilsRoundingTests.java new file mode 100644 index 0000000000000..4ec1c261a2ace --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/DateUtilsRoundingTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.test.ESTestCase; + +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + +import static org.hamcrest.Matchers.equalTo; + +public class DateUtilsRoundingTests extends ESTestCase { + + public void testDateUtilsRounding() { + for (int year = -1000; year < 3000; year++) { + final long startOfYear = DateUtilsRounding.utcMillisAtStartOfYear(year); + assertThat(startOfYear, equalTo(ZonedDateTime.of(year, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli())); + assertThat(DateUtilsRounding.getYear(startOfYear), equalTo(year)); + assertThat(DateUtilsRounding.getYear(startOfYear - 1), equalTo(year - 1)); + assertThat(DateUtilsRounding.getMonthOfYear(startOfYear, year), equalTo(1)); + assertThat(DateUtilsRounding.getMonthOfYear(startOfYear - 1, year - 1), equalTo(12)); + for (int month = 1; month <= 12; month++) { + final long startOfMonth = ZonedDateTime.of(year, month, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant().toEpochMilli(); + assertThat(DateUtilsRounding.getMonthOfYear(startOfMonth, year), equalTo(month)); + if (month > 1) { + assertThat(DateUtilsRounding.getYear(startOfMonth - 1), equalTo(year)); + assertThat(DateUtilsRounding.getMonthOfYear(startOfMonth - 1, year), equalTo(month - 1)); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java index e35d8df1b9c06..e9d0a5a5b9a33 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java @@ -23,8 +23,14 @@ import org.joda.time.DateTimeZone; import java.time.Instant; +import java.time.LocalDate; +import java.time.Month; +import java.time.Year; +import java.time.YearMonth; import java.time.ZoneId; +import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; import java.util.Arrays; import java.util.HashSet; import java.util.Set; @@ -114,4 +120,82 @@ private Instant createRandomInstant() { long nanos = randomLongBetween(0, 999_999_999L); return Instant.ofEpochSecond(seconds, nanos); } + + public void testRoundFloor() { + assertThat(DateUtils.roundFloor(0, randomLongBetween(0, Long.MAX_VALUE)), is(0L)); + + ChronoField randomChronoField = + randomFrom(ChronoField.DAY_OF_MONTH, ChronoField.HOUR_OF_DAY, ChronoField.MINUTE_OF_HOUR, ChronoField.SECOND_OF_MINUTE); + long unitMillis = randomChronoField.getBaseUnit().getDuration().toMillis(); + + int year = randomIntBetween(-3000, 3000); + int month = randomIntBetween(1, 12); + int day = randomIntBetween(1, YearMonth.of(year, month).lengthOfMonth()); + int hour = randomIntBetween(1, 23); + int minute = randomIntBetween(1, 59); + int second = randomIntBetween(1, 59); + int nanos = randomIntBetween(1, 999_999_999); + + ZonedDateTime randomDate = ZonedDateTime.of(year, month, day, hour, minute, second, nanos, ZoneOffset.UTC); + + ZonedDateTime result = randomDate; + switch (randomChronoField) { + case SECOND_OF_MINUTE: + result = result.withNano(0); + break; + case MINUTE_OF_HOUR: + result = result.withNano(0).withSecond(0); + break; + case HOUR_OF_DAY: + result = result.withNano(0).withSecond(0).withMinute(0); + break; + case DAY_OF_MONTH: + result = result.withNano(0).withSecond(0).withMinute(0).withHour(0); + break; + } + + long rounded = DateUtils.roundFloor(randomDate.toInstant().toEpochMilli(), unitMillis); + assertThat(rounded, is(result.toInstant().toEpochMilli())); + } + + public void testRoundQuarterOfYear() { + assertThat(DateUtils.roundQuarterOfYear(0), is(0L)); + long lastQuarter1969 = ZonedDateTime.of(1969, 10, 1, 0, 0, 0, 0, ZoneOffset.UTC) + .toInstant().toEpochMilli(); + assertThat(DateUtils.roundQuarterOfYear(-1), is(lastQuarter1969)); + + int year = randomIntBetween(1970, 2040); + int month = randomIntBetween(1, 12); + int day = randomIntBetween(1, YearMonth.of(year, month).lengthOfMonth()); + + ZonedDateTime randomZonedDateTime = ZonedDateTime.of(year, month, day, + randomIntBetween(0, 23), randomIntBetween(0, 59), randomIntBetween(0, 59), 999_999_999, ZoneOffset.UTC); + long quarterInMillis = Year.of(randomZonedDateTime.getYear()).atMonth(Month.of(month).firstMonthOfQuarter()).atDay(1) + .atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); + long result = DateUtils.roundQuarterOfYear(randomZonedDateTime.toInstant().toEpochMilli()); + assertThat(result, is(quarterInMillis)); + } + + public void testRoundMonthOfYear() { + assertThat(DateUtils.roundMonthOfYear(0), is(0L)); + assertThat(DateUtils.roundMonthOfYear(1), is(0L)); + long dec1969 = LocalDate.of(1969, 12, 1).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); + assertThat(DateUtils.roundMonthOfYear(-1), is(dec1969)); + } + + public void testRoundYear() { + assertThat(DateUtils.roundYear(0), is(0L)); + assertThat(DateUtils.roundYear(1), is(0L)); + long startOf1969 = ZonedDateTime.of(1969, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + .toInstant().toEpochMilli(); + assertThat(DateUtils.roundYear(-1), is(startOf1969)); + long endOf1970 = ZonedDateTime.of(1970, 12, 31, 23, 59, 59, 999_999_999, ZoneOffset.UTC) + .toInstant().toEpochMilli(); + assertThat(DateUtils.roundYear(endOf1970), is(0L)); + // test with some leapyear + long endOf1996 = ZonedDateTime.of(1996, 12, 31, 23, 59, 59, 999_999_999, ZoneOffset.UTC) + .toInstant().toEpochMilli(); + long startOf1996 = Year.of(1996).atDay(1).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); + assertThat(DateUtils.roundYear(endOf1996), is(startOf1996)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 5259cad23e8fb..a374f468a138b 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -169,4 +169,28 @@ private static Fuzziness doSerializeRoundtrip(Fuzziness in) throws IOException { StreamInput streamInput = output.bytes().streamInput(); return new Fuzziness(streamInput); } + + public void testAsDistanceString() { + Fuzziness fuzziness = Fuzziness.build("0"); + assertEquals(0, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + fuzziness = Fuzziness.build("1"); + assertEquals(1, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + fuzziness = Fuzziness.build("2"); + assertEquals(2, fuzziness.asDistance(randomAlphaOfLengthBetween(0, 10))); + + fuzziness = Fuzziness.build("AUTO"); + assertEquals(0, fuzziness.asDistance("")); + assertEquals(0, fuzziness.asDistance("ab")); + assertEquals(1, fuzziness.asDistance("abc")); + assertEquals(1, fuzziness.asDistance("abcde")); + assertEquals(2, fuzziness.asDistance("abcdef")); + + fuzziness = Fuzziness.build("AUTO:5,7"); + assertEquals(0, fuzziness.asDistance("")); + assertEquals(0, fuzziness.asDistance("abcd")); + assertEquals(1, fuzziness.asDistance("abcde")); + assertEquals(1, fuzziness.asDistance("abcdef")); + assertEquals(2, fuzziness.asDistance("abcdefg")); + + } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index cd1a209878346..bafe3b7403d6f 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -525,11 +525,11 @@ public void testJavaTime() throws Exception { assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDateTime) null).endObject()); assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((LocalDateTime) null).endObject()); assertResult("{'date':null}", () -> builder().startObject().field("date", (LocalDateTime) null).endObject()); - assertResult("{'d1':'2016-01-01T00:00:00.000'}", + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1.toLocalDateTime()).endObject()); - assertResult("{'d1':'2016-01-01T00:00:00.000'}", + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1.toLocalDateTime()).endObject()); - assertResult("{'d1':'2016-01-01T00:00:00.000'}", () -> builder().startObject().field("d1", d1.toLocalDateTime()).endObject()); + assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1", d1.toLocalDateTime()).endObject()); // LocalDate (no time, no time zone) assertResult("{'date':null}", () -> builder().startObject().timeField("date", (LocalDate) null).endObject()); diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 97ba76b822020..bd89ceb64e6df 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -30,8 +30,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.Bridge; @@ -65,6 +67,13 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(DEFAULT_SETTINGS).build(); } + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + @Override protected int numberOfShards() { return 3; @@ -128,7 +137,7 @@ List startCluster(int numberOfNodes) { @Override protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); + return Arrays.asList(MockTransportService.TestPlugin.class, InternalSettingsPlugin.class); } ClusterState getNodeClusterState(String node) { diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index d8262dc4f576d..f5d27ea09b631 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -36,7 +37,12 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.NetworkDisruption; @@ -50,6 +56,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -64,7 +71,10 @@ import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.not; @@ -75,6 +85,18 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class ClusterDisruptionIT extends AbstractDisruptionTestCase { + private enum ConflictMode { + none, + external, + create; + + + static ConflictMode randomMode() { + ConflictMode[] values = values(); + return values[randomInt(values.length-1)]; + } + } + /** * Test that we do not loose document whose indexing request was successful, under a randomly selected disruption scheme * We also collect & report the type of indexing failures that occur. @@ -94,6 +116,7 @@ public void testAckedIndexing() throws Exception { assertAcked(prepareCreate("test") .setSettings(Settings.builder() + .put(indexSettings()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) )); @@ -111,7 +134,9 @@ public void testAckedIndexing() throws Exception { final AtomicReference countDownLatchRef = new AtomicReference<>(); final List exceptedExceptions = new CopyOnWriteArrayList<>(); - logger.info("starting indexers"); + final ConflictMode conflictMode = ConflictMode.randomMode(); + + logger.info("starting indexers using conflict mode " + conflictMode); try { for (final String node : nodes) { final Semaphore semaphore = new Semaphore(0); @@ -131,11 +156,17 @@ public void testAckedIndexing() throws Exception { id = Integer.toString(idGenerator.incrementAndGet()); int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries); logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard); - IndexResponse response = - client.prepareIndex("test", "type", id) - .setSource("{}", XContentType.JSON) - .setTimeout(timeout) - .get(timeout); + IndexRequestBuilder indexRequestBuilder = client.prepareIndex("test", "type", id) + .setSource("{}", XContentType.JSON) + .setTimeout(timeout); + + if (conflictMode == ConflictMode.external) { + indexRequestBuilder.setVersion(randomIntBetween(1,10)).setVersionType(VersionType.EXTERNAL); + } else if (conflictMode == ConflictMode.create) { + indexRequestBuilder.setCreate(true); + } + + IndexResponse response = indexRequestBuilder.get(timeout); assertThat(response.getResult(), isOneOf(CREATED, UPDATED)); ackedDocs.put(id, node); logger.trace("[{}] indexed id [{}] through node [{}], response [{}]", name, id, node, response); @@ -414,4 +445,48 @@ public void testIndicesDeleted() throws Exception { assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } + public void testRestartNodeWhileIndexing() throws Exception { + startCluster(3); + String index = "restart_while_indexing"; + assertAcked(client().admin().indices().prepareCreate(index).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, between(1, 2)))); + AtomicBoolean stopped = new AtomicBoolean(); + Thread[] threads = new Thread[between(1, 4)]; + AtomicInteger docID = new AtomicInteger(); + Set ackedDocs = ConcurrentCollections.newConcurrentSet(); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + while (stopped.get() == false && docID.get() < 5000) { + String id = Integer.toString(docID.incrementAndGet()); + try { + IndexResponse response = client().prepareIndex(index, "_doc", id).setSource("{}", XContentType.JSON).get(); + assertThat(response.getResult(), isOneOf(CREATED, UPDATED)); + logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); + ackedDocs.add(response.getId()); + } catch (ElasticsearchException ignore) { + logger.info("--> fail to index id={}", id); + } + } + }); + threads[i].start(); + } + ensureGreen(index); + assertBusy(() -> assertThat(docID.get(), greaterThanOrEqualTo(100))); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback()); + ensureGreen(index); + assertBusy(() -> assertThat(docID.get(), greaterThanOrEqualTo(200))); + stopped.set(true); + for (Thread thread : threads) { + thread.join(); + } + ClusterState clusterState = internalCluster().clusterService().state(); + for (ShardRouting shardRouting : clusterState.routingTable().allShards(index)) { + String nodeName = clusterState.nodes().get(shardRouting.currentNodeId()).getName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); + IndexShard shard = indicesService.getShardOrNull(shardRouting.shardId()); + Set docs = IndexShardTestCase.getShardDocUIDs(shard); + assertThat("shard [" + shard.routingEntry() + "] docIds [" + docs + "] vs " + " acked docIds [" + ackedDocs + "]", + ackedDocs, everyItem(isIn(docs))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 30b0448e09fad..fe74a736fe3ba 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.test.ESTestCase; @@ -194,15 +193,14 @@ public void testLazyConstructionSeedsProvider() { public void testJoinValidator() { BiConsumer consumer = (a, b) -> {}; - // TODO: move to zen2 once join validators are implemented DiscoveryModule module = newModule(Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), - DiscoveryModule.ZEN_DISCOVERY_TYPE).build(), Collections.singletonList(new DiscoveryPlugin() { + DiscoveryModule.ZEN2_DISCOVERY_TYPE).build(), Collections.singletonList(new DiscoveryPlugin() { @Override public BiConsumer getJoinValidator() { return consumer; } })); - ZenDiscovery discovery = (ZenDiscovery) module.getDiscovery(); + Coordinator discovery = (Coordinator) module.getDiscovery(); Collection> onJoinValidators = discovery.getOnJoinValidators(); assertEquals(2, onJoinValidators.size()); assertTrue(onJoinValidators.contains(consumer)); diff --git a/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java index 4df4fc53affd3..fc89bd1a2f30b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -117,7 +116,7 @@ public void testEmptyUnicastHostsFile() throws Exception { public void testUnicastHostsDoesNotExist() { final FileBasedSeedHostsProvider provider = new FileBasedSeedHostsProvider(createTempDir().toAbsolutePath()); final List addresses = provider.getSeedAddresses((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); assertEquals(0, addresses.size()); } @@ -147,7 +146,7 @@ private List setupAndRunHostProvider(final List hostEn } return new FileBasedSeedHostsProvider(configPath).getSeedAddresses((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index a345859ad9eb3..fed9b9552af8c 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -19,28 +19,20 @@ package org.elasticsearch.discovery; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -48,21 +40,14 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; /** * Tests relating to the loss of the master. @@ -71,121 +56,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes - * that already are following another elected master node. These nodes should reject this cluster state and prevent - * them from following the stale master. - */ - @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE") - public void testStaleMasterNotHijackingMajority() throws Exception { - final List nodes = startCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - for (String node : nodes) { - ensureStableCluster(3, node); - } - assertMaster(oldMasterNode, nodes); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (!Objects.equals(previousMaster, currentMaster)) { - logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), - event.previousState()); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for the majority side to get stable - assertDifferentMaster(majoritySide.get(0), oldMasterNode); - assertDifferentMaster(majoritySide.get(1), oldMasterNode); - - // the test is periodically tripping on the following assertion. To find out which threads are blocking the nodes from making - // progress we print a stack dump - boolean failed = true; - try { - assertDiscoveryCompleted(majoritySide); - failed = false; - } finally { - if (failed) { - logger.error("discovery failed to complete, probably caused by a blocked thread: {}", - new HotThreads().busiestThreads(Integer.MAX_VALUE).ignoreIdleThreads(false).detect()); - } - } - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, - // but will be queued and once the old master node un-freezes it gets executed. - // The old master node will send this update + the cluster state where he is flagged as master to the other - // nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new - ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("Unfreeze node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - // Make sure that the end state is consistent on all nodes: - assertDiscoveryCompleted(nodes); - assertMaster(newMasterNode, nodes); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> recordedMasterTransition = entry.getValue(); - assertThat("[" + nodeName + "] Each node should only record two master node transitions", - recordedMasterTransition, hasSize(2)); - assertThat("[" + nodeName + "] First transition's previous master should be [" + oldMasterNode + "]", - recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode)); - assertThat("[" + nodeName + "] First transition's current master should be [null]", - recordedMasterTransition.get(0).v2(), nullValue()); - assertThat("[" + nodeName + "] Second transition's previous master should be [null]", - recordedMasterTransition.get(1).v1(), nullValue()); - assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", - recordedMasterTransition.get(1).v2(), equalTo(newMasterNode)); - } - } - /** * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one */ diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index 78a2f2446c5dc..5ffe242dfb208 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -211,7 +211,7 @@ public void setup() { localNode = newDiscoveryNode("local-node"); ConnectionManager innerConnectionManager - = new ConnectionManager(settings, capturingTransport, deterministicTaskQueue.getThreadPool()); + = new ConnectionManager(settings, capturingTransport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(innerConnectionManager, settings, capturingTransport, deterministicTaskQueue.getThreadPool()); connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> { diff --git a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java index 3254d2e9201a7..0506f5c48e803 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java @@ -19,24 +19,57 @@ package org.elasticsearch.discovery; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.nio.MockNioTransport; import org.junit.After; import org.junit.Before; +import org.mockito.Matchers; +import java.io.Closeable; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; +import java.util.Stack; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.core.IsNull.nullValue; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; public class SeedHostsResolverTests extends ESTestCase { @@ -44,6 +77,10 @@ public class SeedHostsResolverTests extends ESTestCase { private List transportAddresses; private SeedHostsResolver seedHostsResolver; private ThreadPool threadPool; + private ExecutorService executorService; + // close in reverse order as opened + private Stack closeables; + @Before public void startResolver() { @@ -55,12 +92,29 @@ public void startResolver() { seedHostsResolver = new SeedHostsResolver("test_node", Settings.EMPTY, transportService, hostsResolver -> transportAddresses); seedHostsResolver.start(); + + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory("[" + getClass().getName() + "]"); + executorService = + EsExecutors.newScaling( + getClass().getName() + "/" + getTestName(), 0, 2, 60, TimeUnit.SECONDS, threadFactory, threadPool.getThreadContext()); + closeables = new Stack<>(); } @After - public void stopResolver() { + public void stopResolver() throws IOException { seedHostsResolver.stop(); - threadPool.shutdown(); + try { + logger.info("shutting down..."); + // JDK stack is broken, it does not iterate in the expected order (http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4475301) + final List reverse = new ArrayList<>(); + while (!closeables.isEmpty()) { + reverse.add(closeables.pop()); + } + IOUtils.close(reverse); + } finally { + terminate(executorService); + terminate(threadPool); + } } public void testResolvesAddressesInBackgroundAndIgnoresConcurrentCalls() throws Exception { @@ -92,4 +146,239 @@ public void testResolvesAddressesInBackgroundAndIgnoresConcurrentCalls() throws assertTrue(endLatch.await(30, TimeUnit.SECONDS)); assertThat(resolvedAddressesRef.get(), equalTo(transportAddresses)); } + + public void testPortLimit() { + final NetworkService networkService = new NetworkService(Collections.emptyList()); + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + networkService, + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService()) { + + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9500)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9500) + ); + } + }; + closeables.push(transport); + final TransportService transportService = + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); + closeables.push(transportService); + final int limitPortCounts = randomIntBetween(1, 10); + final List transportAddresses = SeedHostsResolver.resolveHostsLists( + executorService, + logger, + Collections.singletonList("127.0.0.1"), + limitPortCounts, + transportService, + TimeValue.timeValueSeconds(30)); + assertThat(transportAddresses, hasSize(limitPortCounts)); + final Set ports = new HashSet<>(); + for (final TransportAddress address : transportAddresses) { + assertTrue(address.address().getAddress().isLoopbackAddress()); + ports.add(address.getPort()); + } + assertThat(ports, equalTo(IntStream.range(9300, 9300 + limitPortCounts).mapToObj(m -> m).collect(Collectors.toSet()))); + } + + public void testRemovingLocalAddresses() { + final NetworkService networkService = new NetworkService(Collections.emptyList()); + final InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + networkService, + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService()) { + + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{ + new TransportAddress(loopbackAddress, 9300), + new TransportAddress(loopbackAddress, 9301) + }, + new TransportAddress(loopbackAddress, 9302) + ); + } + }; + closeables.push(transport); + final TransportService transportService = + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); + closeables.push(transportService); + final List transportAddresses = SeedHostsResolver.resolveHostsLists( + executorService, + logger, + Collections.singletonList(NetworkAddress.format(loopbackAddress)), + 10, + transportService, + TimeValue.timeValueSeconds(30)); + assertThat(transportAddresses, hasSize(7)); + final Set ports = new HashSet<>(); + for (final TransportAddress address : transportAddresses) { + assertTrue(address.address().getAddress().isLoopbackAddress()); + ports.add(address.getPort()); + } + assertThat(ports, equalTo(IntStream.range(9303, 9310).mapToObj(m -> m).collect(Collectors.toSet()))); + } + + public void testUnknownHost() { + final Logger logger = mock(Logger.class); + final NetworkService networkService = new NetworkService(Collections.emptyList()); + final String hostname = randomAlphaOfLength(8); + final UnknownHostException unknownHostException = new UnknownHostException(hostname); + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + networkService, + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService()) { + + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + throw unknownHostException; + } + + }; + closeables.push(transport); + + final TransportService transportService = + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); + closeables.push(transportService); + + final List transportAddresses = SeedHostsResolver.resolveHostsLists( + executorService, + logger, + Arrays.asList(hostname), + 1, + transportService, + TimeValue.timeValueSeconds(30) + ); + + assertThat(transportAddresses, empty()); + verify(logger).warn("failed to resolve host [" + hostname + "]", unknownHostException); + } + + public void testResolveTimeout() { + final Logger logger = mock(Logger.class); + final NetworkService networkService = new NetworkService(Collections.emptyList()); + final CountDownLatch latch = new CountDownLatch(1); + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + networkService, + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService()) { + + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9500)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9500) + ); + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + if ("hostname1".equals(address)) { + return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}; + } else if ("hostname2".equals(address)) { + try { + latch.await(); + return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } else { + throw new UnknownHostException(address); + } + } + + }; + closeables.push(transport); + + final TransportService transportService = + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); + closeables.push(transportService); + final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(3, 5)); + try { + final List transportAddresses = SeedHostsResolver.resolveHostsLists( + executorService, + logger, + Arrays.asList("hostname1", "hostname2"), + 1, + transportService, + resolveTimeout); + + assertThat(transportAddresses, hasSize(1)); + verify(logger).trace( + "resolved host [{}] to {}", "hostname1", + new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}); + verify(logger).warn("timed out after [{}] resolving host [{}]", resolveTimeout, "hostname2"); + verifyNoMoreInteractions(logger); + } finally { + latch.countDown(); + } + } + + public void testInvalidHosts() { + final Logger logger = mock(Logger.class); + final Transport transport = new MockNioTransport( + Settings.EMPTY, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService()) { + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + }; + closeables.push(transport); + + final TransportService transportService = + new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, + Collections.emptySet()); + closeables.push(transportService); + final List transportAddresses = SeedHostsResolver.resolveHostsLists( + executorService, + logger, + Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), + 1, + transportService, + TimeValue.timeValueSeconds(30)); + assertThat(transportAddresses, hasSize(1)); // only one of the two is valid and will be used + assertThat(transportAddresses.get(0).getAddress(), equalTo("127.0.0.1")); + assertThat(transportAddresses.get(0).getPort(), equalTo(9301)); + verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class)); + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 51fef980e3777..c4655bcf7ce9a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -18,28 +18,44 @@ */ package org.elasticsearch.discovery; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService.TestPlugin; +import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static java.util.Collections.singleton; @@ -55,7 +71,7 @@ public class StableMasterDisruptionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(TestPlugin.class); + return Collections.singletonList(MockTransportService.TestPlugin.class); } /** @@ -152,4 +168,101 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType networkDisruption.stopDisrupting(); ensureStableCluster(3); } + + + /** + * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are + * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. + */ + public void testStaleMasterNotHijackingMajority() throws Exception { + final List nodes = internalCluster().startNodes(3, Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .build()); + ensureStableCluster(3); + + // Save the current master node as old master node, because that node will get frozen + final String oldMasterNode = internalCluster().getMasterName(); + + // Simulating a painful gc by suspending all threads for a long time on the current elected master node. + SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); + + // Save the majority side + final List majoritySide = new ArrayList<>(nodes); + majoritySide.remove(oldMasterNode); + + // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: + final Map>> masters = Collections.synchronizedMap(new HashMap<>()); + for (final String node : majoritySide) { + masters.put(node, new ArrayList<>()); + internalCluster().getInstance(ClusterService.class, node).addListener(event -> { + DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); + DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); + if (!Objects.equals(previousMaster, currentMaster)) { + logger.info("--> node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), + event.previousState()); + String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; + String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; + masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); + } + }); + } + + final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); + internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { + if (event.state().nodes().getMasterNodeId() == null) { + oldMasterNodeSteppedDown.countDown(); + } + }); + + internalCluster().setDisruptionScheme(masterNodeDisruption); + logger.info("--> freezing node [{}]", oldMasterNode); + masterNodeDisruption.startDisrupting(); + + // Wait for majority side to elect a new master + assertBusy(() -> { + for (final Map.Entry>> entry : masters.entrySet()) { + final List> transitions = entry.getValue(); + assertTrue(entry.getKey() + ": " + transitions, + transitions.stream().anyMatch(transition -> transition.v2() != null)); + } + }); + + // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and + // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is + // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. + internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new + ClusterStateUpdateTask(Priority.IMMEDIATE) { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); + } + }); + + // Save the new elected master node + final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); + logger.info("--> new detected master node [{}]", newMasterNode); + + // Stop disruption + logger.info("--> unfreezing node [{}]", oldMasterNode); + masterNodeDisruption.stopDisrupting(); + + oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); + logger.info("--> [{}] stepped down as master", oldMasterNode); + ensureStableCluster(3); + + assertThat(masters.size(), equalTo(2)); + for (Map.Entry>> entry : masters.entrySet()) { + String nodeName = entry.getKey(); + List> transitions = entry.getValue(); + assertTrue("[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, + transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2()))); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 13d314c57505b..c3cae8f10ffce 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -19,33 +19,27 @@ package org.elasticsearch.discovery.single; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.coordination.JoinHelper; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.SeedHostsProvider; -import org.elasticsearch.discovery.zen.UnicastZenPing; -import org.elasticsearch.discovery.zen.ZenPing; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.NodeConfigurationSource; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; -import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; -import java.util.Collections; -import java.util.Stack; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; @@ -69,53 +63,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public void testDoesNotRespondToZenPings() throws Exception { - final Settings settings = - Settings.builder().put("cluster.name", internalCluster().getClusterName()).build(); - final Version version = Version.CURRENT; - final Stack closeables = new Stack<>(); - final TestThreadPool threadPool = new TestThreadPool(getClass().getName()); - try { - final MockTransportService pingTransport = - MockTransportService.createNewService(settings, version, threadPool, null); - pingTransport.start(); - closeables.push(pingTransport); - final TransportService nodeTransport = - internalCluster().getInstance(TransportService.class); - // try to ping the single node directly - final SeedHostsProvider provider = - hostsResolver -> Collections.singletonList(nodeTransport.getLocalNode().getAddress()); - final CountDownLatch latch = new CountDownLatch(1); - final DiscoveryNodes nodes = DiscoveryNodes.builder() - .add(nodeTransport.getLocalNode()) - .add(pingTransport.getLocalNode()) - .localNodeId(pingTransport.getLocalNode().getId()) - .build(); - final ClusterName clusterName = new ClusterName(internalCluster().getClusterName()); - final ClusterState state = ClusterState.builder(clusterName).nodes(nodes).build(); - final UnicastZenPing unicastZenPing = - new UnicastZenPing(settings, threadPool, pingTransport, provider, () -> state) { - @Override - protected void finishPingingRound(PingingRound pingingRound) { - latch.countDown(); - super.finishPingingRound(pingingRound); - } - }; - unicastZenPing.start(); - closeables.push(unicastZenPing); - final CompletableFuture responses = new CompletableFuture<>(); - unicastZenPing.ping(responses::complete, TimeValue.timeValueSeconds(3)); - latch.await(); - responses.get(); - assertThat(responses.get().size(), equalTo(0)); - } finally { - while (!closeables.isEmpty()) { - IOUtils.closeWhileHandlingException(closeables.pop()); - } - terminate(threadPool); - } - } - public void testSingleNodesDoNotDiscoverEachOther() throws IOException, InterruptedException { final TransportService service = internalCluster().getInstance(TransportService.class); final int port = service.boundAddress().publishAddress().getPort(); @@ -167,6 +114,78 @@ public Path nodeConfigPath(int nodeOrdinal) { } } + public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "failed to join") { + + @Override + public boolean innerMatch(final LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == RemoteTransportException.class + && event.getThrown().getCause() != null + && event.getThrown().getCause().getClass() == IllegalStateException.class + && event.getThrown().getCause().getMessage().contains( + "cannot join node with [discovery.type] set to [single-node]"); + } + }); + final TransportService service = internalCluster().getInstance(TransportService.class); + final int port = service.boundAddress().publishAddress().getPort(); + final NodeConfigurationSource configurationSource = new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings + .builder() + .put("discovery.type", "zen") + .put("transport.type", getTestTransportType()) + .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + /* + * We align the port ranges of the two as then with zen discovery these two + * nodes would find each other. + */ + .put("transport.port", port + "-" + (port + 5 - 1)) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }; + try (InternalTestCluster other = + new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity())) { + + Logger clusterLogger = LogManager.getLogger(JoinHelper.class); + Loggers.addAppender(clusterLogger, mockAppender); + try { + other.beforeTest(random(), 0); + final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); + assertThat(first.nodes().getSize(), equalTo(1)); + assertBusy(() -> mockAppender.assertAllExpectationsMatched()); + } finally { + Loggers.removeAppender(clusterLogger, mockAppender); + mockAppender.stop(); + } + } + } + public void testStatePersistence() throws Exception { createIndex("test"); internalCluster().fullRestart(); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java deleted file mode 100644 index c3dfad2d43792..0000000000000 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.util.Stack; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; - -import static org.elasticsearch.test.ClusterServiceUtils.createMasterService; -import static org.hamcrest.Matchers.equalTo; - -public class SingleNodeDiscoveryTests extends ESTestCase { - - public void testInitialJoin() throws Exception { - final Settings settings = Settings.EMPTY; - final Version version = Version.CURRENT; - final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - final Stack stack = new Stack<>(); - try { - final MockTransportService transportService = - MockTransportService.createNewService(settings, version, threadPool, null); - stack.push(transportService); - transportService.start(); - final DiscoveryNode node = transportService.getLocalNode(); - final MasterService masterService = createMasterService(threadPool, node); - AtomicReference clusterState = new AtomicReference<>(); - final SingleNodeDiscovery discovery = - new SingleNodeDiscovery(Settings.EMPTY, transportService, - masterService, new ClusterApplier() { - @Override - public void setInitialState(ClusterState initialState) { - clusterState.set(initialState); - } - - @Override - public void onNewClusterState(String source, Supplier clusterStateSupplier, - ClusterApplyListener listener) { - clusterState.set(clusterStateSupplier.get()); - listener.onSuccess(source); - } - }, null); - discovery.start(); - discovery.startInitialJoin(); - final DiscoveryNodes nodes = clusterState.get().nodes(); - assertThat(nodes.getSize(), equalTo(1)); - assertThat(nodes.getMasterNode().getId(), equalTo(node.getId())); - } finally { - while (!stack.isEmpty()) { - IOUtils.closeWhileHandlingException(stack.pop()); - } - terminate(threadPool); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java deleted file mode 100644 index 87e65d17fd9aa..0000000000000 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ /dev/null @@ -1,940 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.zen; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportSettings; -import org.elasticsearch.transport.nio.MockNioTransport; -import org.junit.After; -import org.junit.Before; -import org.mockito.Matchers; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiFunction; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; - -public class UnicastZenPingTests extends ESTestCase { - - private ThreadPool threadPool; - private ExecutorService executorService; - // close in reverse order as opened - private Stack closeables; - - @Before - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool(getClass().getName()); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory("[" + getClass().getName() + "]"); - executorService = - EsExecutors.newScaling( - getClass().getName() + "/" + getTestName(), 0, 2, 60, TimeUnit.SECONDS, threadFactory, threadPool.getThreadContext()); - closeables = new Stack<>(); - } - - @After - public void tearDown() throws Exception { - try { - logger.info("shutting down..."); - // JDK stack is broken, it does not iterate in the expected order (http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4475301) - final List reverse = new ArrayList<>(); - while (!closeables.isEmpty()) { - reverse.add(closeables.pop()); - } - IOUtils.close(reverse); - } finally { - terminate(executorService); - terminate(threadPool); - super.tearDown(); - } - } - - public void testSimplePings() throws IOException, InterruptedException, ExecutionException { - // use ephemeral ports - final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build(); - final Settings settingsMismatch = - Settings.builder().put(settings).put("cluster.name", "mismatch").put(TransportSettings.PORT.getKey(), 0).build(); - - NetworkService networkService = new NetworkService(Collections.emptyList()); - - final BiFunction supplier = (s, v) -> new MockNioTransport( - s, - v, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()); - - NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier); - closeables.push(handleA.transportService); - NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier); - closeables.push(handleB.transportService); - NetworkHandle handleC = startServices(settingsMismatch, threadPool, "UZP_C", Version.CURRENT, supplier); - closeables.push(handleC.transportService); - final Version versionD; - if (randomBoolean()) { - versionD = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - } else { - versionD = Version.CURRENT; - } - logger.info("UZP_D version set to [{}]", versionD); - NetworkHandle handleD = startServices(settingsMismatch, threadPool, "UZP_D", versionD, supplier); - closeables.push(handleD.transportService); - - final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); - final ClusterState stateMismatch = ClusterState.builder(new ClusterName("mismatch")).version(randomNonNegativeLong()).build(); - - final Settings hostsSettings = Settings.builder() - .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey(), - NetworkAddress.format(new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort())), - NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())), - NetworkAddress.format(new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort())), - NetworkAddress.format(new InetSocketAddress(handleD.address.address().getAddress(), handleD.address.address().getPort()))) - .put("cluster.name", "test") - .build(); - - Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build(); - ClusterState stateA = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) - .build(); - TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); - zenPingA.start(); - closeables.push(zenPingA); - - ClusterState stateB = ClusterState.builder(state) - .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) - .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); - zenPingB.start(); - closeables.push(zenPingB); - - ClusterState stateC = ClusterState.builder(stateMismatch) - .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) - .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleC, () -> stateC) { - @Override - protected Version getVersion() { - return versionD; - } - }; - zenPingC.start(); - closeables.push(zenPingC); - - ClusterState stateD = ClusterState.builder(stateMismatch) - .nodes(DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D")) - .build(); - TestUnicastZenPing zenPingD = new TestUnicastZenPing(hostsSettingsMismatch, threadPool, handleD, () -> stateD); - zenPingD.start(); - closeables.push(zenPingD); - - logger.info("ping from UZP_A"); - Collection pingResponses = zenPingA.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ZenPing.PingResponse ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_B")); - assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertPings(handleA, handleB); - assertNoPings(handleA, handleC); // mismatch, shouldn't ping - assertNoPings(handleA, handleD); // mismatch, shouldn't ping - - // ping again, this time from B, - logger.info("ping from UZP_B"); - pingResponses = zenPingB.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_A")); - assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION)); - assertPings(handleB, handleA); - assertNoPings(handleB, handleC); // mismatch, shouldn't ping - assertNoPings(handleB, handleD); // mismatch, shouldn't ping - - logger.info("ping from UZP_C"); - pingResponses = zenPingC.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - assertNoPings(handleC, handleA); - assertNoPings(handleC, handleB); - assertPings(handleC, handleD); - - logger.info("ping from UZP_D"); - pingResponses = zenPingD.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - assertNoPings(handleD, handleA); - assertNoPings(handleD, handleB); - assertPings(handleD, handleC); - - zenPingC.close(); - handleD.counters.clear(); - logger.info("ping from UZP_D after closing UZP_C"); - pingResponses = zenPingD.pingAndWait().toList(); - // check that node does not respond to pings anymore after the ping service has been closed - assertThat(pingResponses.size(), equalTo(0)); - assertNoPings(handleD, handleA); - assertNoPings(handleD, handleB); - assertPings(handleD, handleC); - } - - public void testUnknownHostNotCached() throws ExecutionException, InterruptedException { - // use ephemeral ports - final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build(); - - final NetworkService networkService = new NetworkService(Collections.emptyList()); - - final Map addresses = new HashMap<>(); - final BiFunction supplier = (s, v) -> new MockNioTransport( - s, - v, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - final TransportAddress[] transportAddresses = addresses.get(address); - if (transportAddresses == null) { - throw new UnknownHostException(address); - } else { - return transportAddresses; - } - } - }; - - final NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier); - closeables.push(handleA.transportService); - final NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier); - closeables.push(handleB.transportService); - final NetworkHandle handleC = startServices(settings, threadPool, "UZP_C", Version.CURRENT, supplier); - closeables.push(handleC.transportService); - - addresses.put( - "UZP_A", - new TransportAddress[]{ - new TransportAddress( - new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort()))}); - addresses.put( - "UZP_C", - new TransportAddress[]{ - new TransportAddress( - new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort()))}); - - final Settings hostsSettings = Settings.builder() - .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey(), "UZP_A", "UZP_B", "UZP_C") - .put("cluster.name", "test") - .build(); - - final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); - - ClusterState stateA = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .nodes(DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A")) - .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); - zenPingA.start(); - closeables.push(zenPingA); - - ClusterState stateB = ClusterState.builder(state) - .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) - .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); - zenPingB.start(); - closeables.push(zenPingB); - - ClusterState stateC = ClusterState.builder(state) - .nodes(DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C")) - .build(); - TestUnicastZenPing zenPingC = new TestUnicastZenPing(hostsSettings, threadPool, handleC, () -> stateC); - zenPingC.start(); - closeables.push(zenPingC); - - // the presence of an unresolvable host should not prevent resolvable hosts from being pinged - { - final Collection pingResponses = zenPingA.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ZenPing.PingResponse ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_C")); - assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertNoPings(handleA, handleB); - assertPings(handleA, handleC); - assertNull(handleA.counters.get(handleB.address)); - } - - final HashMap moreThan = new HashMap<>(); - // we should see at least one ping to UZP_B, and one more ping than we have already seen to UZP_C - moreThan.put(handleB.address, 0); - moreThan.put(handleC.address, handleA.counters.get(handleC.address).intValue()); - - // now allow UZP_B to be resolvable - addresses.put( - "UZP_B", - new TransportAddress[]{ - new TransportAddress( - new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort()))}); - - // now we should see pings to UZP_B; this establishes that host resolutions are not cached - { - handleA.counters.clear(); - final Collection secondPingResponses = zenPingA.pingAndWait().toList(); - assertThat(secondPingResponses.size(), equalTo(2)); - final Set ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList())); - assertThat(ids, equalTo(new HashSet<>(Arrays.asList("UZP_B", "UZP_C")))); - assertPings(handleA, handleB); - assertPings(handleA, handleC); - } - } - - public void testPortLimit() throws InterruptedException { - final NetworkService networkService = new NetworkService(Collections.emptyList()); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9500)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9500) - ); - } - }; - closeables.push(transport); - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - final int limitPortCounts = randomIntBetween(1, 10); - final List transportAddresses = UnicastZenPing.resolveHostsLists( - executorService, - logger, - Collections.singletonList("127.0.0.1"), - limitPortCounts, - transportService, - TimeValue.timeValueSeconds(30)); - assertThat(transportAddresses, hasSize(limitPortCounts)); - final Set ports = new HashSet<>(); - for (final TransportAddress address : transportAddresses) { - assertTrue(address.address().getAddress().isLoopbackAddress()); - ports.add(address.getPort()); - } - assertThat(ports, equalTo(IntStream.range(9300, 9300 + limitPortCounts).mapToObj(m -> m).collect(Collectors.toSet()))); - } - - public void testRemovingLocalAddresses() throws InterruptedException { - final NetworkService networkService = new NetworkService(Collections.emptyList()); - final InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{ - new TransportAddress(loopbackAddress, 9300), - new TransportAddress(loopbackAddress, 9301) - }, - new TransportAddress(loopbackAddress, 9302) - ); - } - }; - closeables.push(transport); - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - final List transportAddresses = UnicastZenPing.resolveHostsLists( - executorService, - logger, - Collections.singletonList(NetworkAddress.format(loopbackAddress)), - 10, - transportService, - TimeValue.timeValueSeconds(30)); - assertThat(transportAddresses, hasSize(7)); - final Set ports = new HashSet<>(); - for (final TransportAddress address : transportAddresses) { - assertTrue(address.address().getAddress().isLoopbackAddress()); - ports.add(address.getPort()); - } - assertThat(ports, equalTo(IntStream.range(9303, 9310).mapToObj(m -> m).collect(Collectors.toSet()))); - } - - public void testUnknownHost() throws InterruptedException { - final Logger logger = mock(Logger.class); - final NetworkService networkService = new NetworkService(Collections.emptyList()); - final String hostname = randomAlphaOfLength(8); - final UnknownHostException unknownHostException = new UnknownHostException(hostname); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - throw unknownHostException; - } - - }; - closeables.push(transport); - - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - - final List transportAddresses = UnicastZenPing.resolveHostsLists( - executorService, - logger, - Arrays.asList(hostname), - 1, - transportService, - TimeValue.timeValueSeconds(30) - ); - - assertThat(transportAddresses, empty()); - verify(logger).warn("failed to resolve host [" + hostname + "]", unknownHostException); - } - - public void testResolveTimeout() throws InterruptedException { - final Logger logger = mock(Logger.class); - final NetworkService networkService = new NetworkService(Collections.emptyList()); - final CountDownLatch latch = new CountDownLatch(1); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9500)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9500) - ); - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - if ("hostname1".equals(address)) { - return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}; - } else if ("hostname2".equals(address)) { - try { - latch.await(); - return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}; - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } else { - throw new UnknownHostException(address); - } - } - - }; - closeables.push(transport); - - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(3, 5)); - try { - final List transportAddresses = UnicastZenPing.resolveHostsLists( - executorService, - logger, - Arrays.asList("hostname1", "hostname2"), - 1, - transportService, - resolveTimeout); - - assertThat(transportAddresses, hasSize(1)); - verify(logger).trace( - "resolved host [{}] to {}", "hostname1", - new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}); - verify(logger).warn("timed out after [{}] resolving host [{}]", resolveTimeout, "hostname2"); - verifyNoMoreInteractions(logger); - } finally { - latch.countDown(); - } - } - - public void testResolveReuseExistingNodeConnections() throws ExecutionException, InterruptedException { - final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build(); - - NetworkService networkService = new NetworkService(Collections.emptyList()); - - final BiFunction supplier = (s, v) -> new MockNioTransport( - s, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()); - - NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier, EnumSet.allOf(Role.class)); - closeables.push(handleA.transportService); - NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier, EnumSet.allOf(Role.class)); - closeables.push(handleB.transportService); - - final boolean useHosts = randomBoolean(); - final Settings.Builder hostsSettingsBuilder = Settings.builder().put("cluster.name", "test"); - if (useHosts) { - hostsSettingsBuilder.putList(DISCOVERY_SEED_HOSTS_SETTING.getKey(), - NetworkAddress.format(new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort())) - ); - } else { - hostsSettingsBuilder.put(DISCOVERY_SEED_HOSTS_SETTING.getKey(), (String) null); - } - final Settings hostsSettings = hostsSettingsBuilder.build(); - - final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); - - // connection to reuse - handleA.transportService.connectToNode(handleB.node); - - // install a listener to check that no new connections are made - handleA.transportService.addConnectionListener(new TransportConnectionListener() { - @Override - public void onConnectionOpened(Transport.Connection connection) { - fail("should not open any connections. got [" + connection.getNode() + "]"); - } - }); - - final ClusterState stateA = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")) - .build(); - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); - zenPingA.start(); - closeables.push(zenPingA); - - final ClusterState stateB = ClusterState.builder(state) - .nodes(DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")) - .build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); - zenPingB.start(); - closeables.push(zenPingB); - - Collection pingResponses = zenPingA.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ZenPing.PingResponse ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_B")); - assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - - } - - public void testPingingTemporalPings() throws ExecutionException, InterruptedException { - final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build(); - - NetworkService networkService = new NetworkService(Collections.emptyList()); - - final BiFunction supplier = (s, v) -> new MockNioTransport( - s, - v, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()); - - NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier, EnumSet.allOf(Role.class)); - closeables.push(handleA.transportService); - NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier, EnumSet.allOf(Role.class)); - closeables.push(handleB.transportService); - - final Settings hostsSettings = Settings.builder() - .put("cluster.name", "test") - .put(DISCOVERY_SEED_HOSTS_SETTING.getKey(), (String) null) // use nodes for simplicity - .build(); - - final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()).build(); - final ClusterState stateA = ClusterState.builder(state) - .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .nodes(DiscoveryNodes.builder().add(handleA.node).add(handleB.node).localNodeId("UZP_A")).build(); - - final TestUnicastZenPing zenPingA = new TestUnicastZenPing(hostsSettings, threadPool, handleA, () -> stateA); - zenPingA.start(); - closeables.push(zenPingA); - - // Node B doesn't know about A! - final ClusterState stateB = ClusterState.builder(state).nodes( - DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B")).build(); - TestUnicastZenPing zenPingB = new TestUnicastZenPing(hostsSettings, threadPool, handleB, () -> stateB); - zenPingB.start(); - closeables.push(zenPingB); - - { - logger.info("pinging from UZP_A so UZP_B will learn about it"); - Collection pingResponses = zenPingA.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ZenPing.PingResponse ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_B")); - assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - } - { - logger.info("pinging from UZP_B"); - Collection pingResponses = zenPingB.pingAndWait().toList(); - assertThat(pingResponses.size(), equalTo(1)); - ZenPing.PingResponse ping = pingResponses.iterator().next(); - assertThat(ping.node().getId(), equalTo("UZP_A")); - assertThat(ping.getClusterStateVersion(), equalTo(-1L)); // A has a block - } - } - - public void testInvalidHosts() throws InterruptedException { - final Logger logger = mock(Logger.class); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - new NetworkService(Collections.emptyList()), - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - }; - closeables.push(transport); - - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - final List transportAddresses = UnicastZenPing.resolveHostsLists( - executorService, - logger, - Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), - 1, - transportService, - TimeValue.timeValueSeconds(30)); - assertThat(transportAddresses, hasSize(1)); // only one of the two is valid and will be used - assertThat(transportAddresses.get(0).getAddress(), equalTo("127.0.0.1")); - assertThat(transportAddresses.get(0).getPort(), equalTo(9301)); - verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class)); - } - - private void assertNoPings(final NetworkHandle fromNode, final NetworkHandle toNode) { - final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); - final String onNodeName = fromNode.node.getName(); - assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); - final String forNodeName = toNode.node.getName(); - assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", - counter.get(), equalTo(0)); - } - - private void assertPings(final NetworkHandle fromNode, final NetworkHandle toNode) { - final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); - final String onNodeName = fromNode.node.getName(); - assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); - final String forNodeName = toNode.node.getName(); - if (Constants.WINDOWS) { - // Some of the ping attempts seem to sporadically fail on Windows (see https://github.com/elastic/elasticsearch/issues/28685) - // Anyhow, the point of the test is not to assert the exact number of pings, but to check if pinging has taken place or not - assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", - counter.get(), greaterThan(0)); - } else { - assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", - counter.get(), equalTo(3)); - } - - } - - private NetworkHandle startServices( - final Settings settings, - final ThreadPool threadPool, - final String nodeId, - final Version version, - final BiFunction supplier) { - return startServices(settings, threadPool, nodeId, version, supplier, emptySet()); - - } - - private NetworkHandle startServices( - final Settings settings, - final ThreadPool threadPool, - final String nodeId, - final Version version, - final BiFunction supplier, - final Set nodeRoles) { - final Settings nodeSettings = Settings.builder().put(settings) - .put("node.name", nodeId) - .put(TransportSettings.TRACE_LOG_INCLUDE_SETTING.getKey(), "internal:discovery/zen/unicast") - .build(); - final Transport transport = supplier.apply(nodeSettings, version); - final MockTransportService transportService = - new MockTransportService(nodeSettings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> - new DiscoveryNode(nodeId, nodeId, boundAddress.publishAddress(), emptyMap(), nodeRoles, version), null, - Collections.emptySet()); - transportService.start(); - transportService.acceptIncomingRequests(); - final ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); - transportService.addTracer(new MockTransportService.Tracer() { - @Override - public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { - counters.computeIfAbsent(node.getAddress(), k -> new AtomicInteger()); - counters.get(node.getAddress()).incrementAndGet(); - } - }); - return new NetworkHandle(transport.boundAddress().publishAddress(), transportService, transportService.getLocalNode(), counters); - } - - private static class NetworkHandle { - - public final TransportAddress address; - public final TransportService transportService; - public final DiscoveryNode node; - public final ConcurrentMap counters; - - NetworkHandle( - final TransportAddress address, - final TransportService transportService, - final DiscoveryNode discoveryNode, - final ConcurrentMap counters) { - this.address = address; - this.transportService = transportService; - this.node = discoveryNode; - this.counters = counters; - } - } - - private static class TestUnicastZenPing extends UnicastZenPing { - - private static final Logger logger = LogManager.getLogger(TestUnicastZenPing.class); - - TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle, - PingContextProvider contextProvider) { - super(Settings.builder().put("node.name", networkHandle.node.getName()).put(settings).build(), - threadPool, networkHandle.transportService, - new SettingsBasedSeedHostsProvider(settings, networkHandle.transportService), contextProvider); - } - - volatile CountDownLatch allTasksCompleted; - volatile AtomicInteger pendingTasks; - volatile CountDownLatch pingingRoundClosed; - - PingCollection pingAndWait() throws ExecutionException, InterruptedException { - allTasksCompleted = new CountDownLatch(1); - pingingRoundClosed = new CountDownLatch(1); - pendingTasks = new AtomicInteger(); - // mark the three sending rounds as ongoing - markTaskAsStarted("send pings"); - markTaskAsStarted("send pings"); - markTaskAsStarted("send pings"); - final AtomicReference response = new AtomicReference<>(); - ping(response::set, TimeValue.timeValueMillis(1), TimeValue.timeValueSeconds(1)); - pingingRoundClosed.await(); - final PingCollection result = response.get(); - assertNotNull("pinging didn't complete", result); - return result; - } - - @Override - protected void finishPingingRound(PingingRound pingingRound) { - // wait for all activity to finish before closing - try { - allTasksCompleted.await(); - } catch (InterruptedException e) { - // ok, finish anyway - } - super.finishPingingRound(pingingRound); - pingingRoundClosed.countDown(); - } - - @Override - protected void sendPings(TimeValue timeout, PingingRound pingingRound) { - super.sendPings(timeout, pingingRound); - markTaskAsCompleted("send pings"); - } - - @Override - protected void submitToExecutor(AbstractRunnable abstractRunnable) { - markTaskAsStarted("executor runnable"); - super.submitToExecutor(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - try { - super.onRejection(e); - } finally { - markTaskAsCompleted("executor runnable (rejected)"); - } - } - - @Override - public void onAfter() { - markTaskAsCompleted("executor runnable"); - } - - @Override - protected void doRun() throws Exception { - abstractRunnable.run(); - } - - @Override - public void onFailure(Exception e) { - // we shouldn't really end up here. - throw new AssertionError("unexpected error", e); - } - }); - } - - private void markTaskAsStarted(String task) { - logger.trace("task [{}] started. count [{}]", task, pendingTasks.incrementAndGet()); - } - - private void markTaskAsCompleted(String task) { - final int left = pendingTasks.decrementAndGet(); - logger.trace("task [{}] completed. count [{}]", task, left); - if (left == 0) { - allTasksCompleted.countDown(); - } - } - - @Override - protected TransportResponseHandler getPingResponseHandler(PingingRound pingingRound, DiscoveryNode node) { - markTaskAsStarted("ping [" + node + "]"); - TransportResponseHandler original = super.getPingResponseHandler(pingingRound, node); - return new TransportResponseHandler() { - @Override - public UnicastPingResponse read(StreamInput in) throws IOException { - return original.read(in); - } - - @Override - public void handleResponse(UnicastPingResponse response) { - original.handleResponse(response); - markTaskAsCompleted("ping [" + node + "]"); - } - - @Override - public void handleException(TransportException exp) { - original.handleException(exp); - markTaskAsCompleted("ping [" + node + "] (error)"); - } - - @Override - public String executor() { - return original.executor(); - } - }; - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 084ba62c4792d..49163f9aa1fb1 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.coordination.JoinTaskExecutor; import org.elasticsearch.cluster.coordination.NodeRemovalClusterStateTaskExecutor; +import org.elasticsearch.cluster.coordination.ValidateJoinRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -421,13 +422,13 @@ public void testValidateOnUnsupportedIndexVersionCreated() throws Exception { .routingTable(RoutingTable.builder().add(indexRoutingTable).build()); if (incompatible) { IllegalStateException ex = expectThrows(IllegalStateException.class, () -> - request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), null, null)); + request.messageReceived(new ValidateJoinRequest(stateBuilder.build()), null, null)); assertEquals("index [test] version not supported: " + VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion()) + " minimum compatible index version is: " + Version.CURRENT.minimumIndexCompatibilityVersion(), ex.getMessage()); } else { AtomicBoolean sendResponse = new AtomicBoolean(false); - request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), new TransportChannel() { + request.messageReceived(new ValidateJoinRequest(stateBuilder.build()), new TransportChannel() { @Override public String getProfileName() { diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index a667514fa7ef2..89a10af1a6fc2 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -135,11 +135,11 @@ public void testShardLock() throws Exception { final NodeEnvironment env = newNodeEnvironment(); Index index = new Index("foo", "fooUUID"); - ShardLock fooLock = env.shardLock(new ShardId(index, 0)); + ShardLock fooLock = env.shardLock(new ShardId(index, 0), "1"); assertEquals(new ShardId(index, 0), fooLock.getShardId()); try { - env.shardLock(new ShardId(index, 0)); + env.shardLock(new ShardId(index, 0), "2"); fail("shard is locked"); } catch (ShardLockObtainFailedException ex) { // expected @@ -149,7 +149,7 @@ public void testShardLock() throws Exception { Files.createDirectories(path.resolve("1")); } try { - env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10)); + env.lockAllForIndex(index, idxSettings, "3", randomIntBetween(0, 10)); fail("shard 0 is locked"); } catch (ShardLockObtainFailedException ex) { // expected @@ -157,11 +157,11 @@ public void testShardLock() throws Exception { fooLock.close(); // can lock again? - env.shardLock(new ShardId(index, 0)).close(); + env.shardLock(new ShardId(index, 0), "4").close(); - List locks = env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10)); + List locks = env.lockAllForIndex(index, idxSettings, "5", randomIntBetween(0, 10)); try { - env.shardLock(new ShardId(index, 0)); + env.shardLock(new ShardId(index, 0), "6"); fail("shard is locked"); } catch (ShardLockObtainFailedException ex) { // expected @@ -239,7 +239,7 @@ public void testResolveIndexFolders() throws Exception { public void testDeleteSafe() throws Exception { final NodeEnvironment env = newNodeEnvironment(); final Index index = new Index("foo", "fooUUID"); - ShardLock fooLock = env.shardLock(new ShardId(index, 0)); + ShardLock fooLock = env.shardLock(new ShardId(index, 0), "1"); assertEquals(new ShardId(index, 0), fooLock.getShardId()); for (Path path : env.indexPaths(index)) { @@ -295,7 +295,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { start.await(); - try (ShardLock autoCloses = env.shardLock(new ShardId(index, 0))) { + try (ShardLock autoCloses = env.shardLock(new ShardId(index, 0), "2")) { blockLatch.countDown(); Thread.sleep(randomIntBetween(1, 10)); } @@ -353,7 +353,7 @@ public void run() { for (int i = 0; i < iters; i++) { int shard = randomIntBetween(0, counts.length - 1); try { - try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), + try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), "1", scaledRandomIntBetween(0, 10))) { counts[shard].value++; countsAtomic[shard].incrementAndGet(); diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java new file mode 100644 index 0000000000000..c07d710f60508 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.contains; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class NodeRepurposeCommandIT extends ESIntegTestCase { + + public void testRepurpose() throws Exception { + final String indexName = "test-repurpose"; + + logger.info("--> starting two nodes"); + internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + + logger.info("--> creating index"); + prepareCreate(indexName, Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ).get(); + final String indexUUID = resolveIndex(indexName).getUUID(); + + logger.info("--> indexing a simple document"); + client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); + + ensureGreen(); + + assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists()); + + final Settings noMasterNoDataSettings = Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + + internalCluster().stopRandomDataNode(); + + // verify test setup + logger.info("--> restarting node with node.data=false and node.master=false"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false and node.master=false while having existing index metadata must fail", + () -> internalCluster().startCoordinatingOnlyNode(Settings.EMPTY) + ); + + logger.info("--> Repurposing node 1"); + executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 1, 1); + + ElasticsearchException lockedException = expectThrows(ElasticsearchException.class, + () -> executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 1) + ); + + assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); + + logger.info("--> Starting node after repurpose"); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + assertTrue(indexExists(indexName)); + expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get()); + + logger.info("--> Restarting and repurposing other node"); + + internalCluster().stopRandomNode(s -> true); + internalCluster().stopRandomNode(s -> true); + + executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 0); + + // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool + // does not mess things up so much that the nodes cannot boot as master or data node any longer. + internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNode(); + + ensureGreen(); + + // index is gone. + assertFalse(indexExists(indexName)); + } + + private void executeRepurposeCommandForOrdinal(Settings settings, String indexUUID, int ordinal, + int expectedShardCount) throws Exception { + boolean verbose = randomBoolean(); + Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); + int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length; + Matcher matcher = allOf( + containsString(NodeRepurposeCommand.noMasterMessage(1, expectedShardCount, expectedIndexCount)), + not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)), + NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false)); + NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, + verbose, ordinal); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java new file mode 100644 index 0000000000000..436439d64db1f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeRepurposeCommandTests.java @@ -0,0 +1,326 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import joptsimple.OptionSet; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.Manifest; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.Index; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.stream.Stream; + +import static org.elasticsearch.env.NodeRepurposeCommand.NO_CLEANUP; +import static org.elasticsearch.env.NodeRepurposeCommand.NO_DATA_TO_CLEAN_UP_FOUND; +import static org.elasticsearch.env.NodeRepurposeCommand.NO_SHARD_DATA_TO_CLEAN_UP_FOUND; +import static org.elasticsearch.env.NodeRepurposeCommand.PRE_V7_MESSAGE; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; + +public class NodeRepurposeCommandTests extends ESTestCase { + + private static final Index INDEX = new Index("testIndex", "testUUID"); + private Settings dataMasterSettings; + private Environment environment; + private Path[] nodePaths; + private Settings dataNoMasterSettings; + private Settings noDataNoMasterSettings; + private Settings noDataMasterSettings; + + @Before + public void createNodePaths() throws IOException { + dataMasterSettings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(dataMasterSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) { + nodePaths = nodeEnvironment.nodeDataPaths(); + } + dataNoMasterSettings = Settings.builder() + .put(dataMasterSettings) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + noDataNoMasterSettings = Settings.builder() + .put(dataMasterSettings) + .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .build(); + noDataMasterSettings = Settings.builder() + .put(dataMasterSettings) + .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), true) + .build(); + } + + public void testEarlyExitNoCleanup() throws Exception { + createIndexDataFiles(dataMasterSettings, randomInt(10)); + + verifyNoQuestions(dataMasterSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataNoMasterSettings, containsString(NO_CLEANUP)); + } + + public void testNothingToCleanup() throws Exception { + verifyNoQuestions(noDataNoMasterSettings, allOf(containsString(NO_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + verifyNoQuestions(noDataMasterSettings, + allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + + createManifest(null); + + verifyNoQuestions(noDataNoMasterSettings, allOf(containsString(NO_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + verifyNoQuestions(noDataMasterSettings, + allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + + createIndexDataFiles(dataMasterSettings, 0); + + verifyNoQuestions(noDataMasterSettings, + allOf(containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND), not(containsString(PRE_V7_MESSAGE)))); + + } + + public void testLocked() throws IOException { + try (NodeEnvironment env = new NodeEnvironment(dataMasterSettings, TestEnvironment.newEnvironment(dataMasterSettings))) { + assertThat(expectThrows(ElasticsearchException.class, + () -> verifyNoQuestions(noDataNoMasterSettings, null)).getMessage(), + containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); + } + } + + public void testCleanupAll() throws Exception { + Manifest oldManifest = createManifest(INDEX); + checkCleanupAll(not(containsString(PRE_V7_MESSAGE))); + + Manifest newManifest = loadManifest(); + assertThat(newManifest.getIndexGenerations().entrySet(), hasSize(0)); + assertManifestIdenticalExceptIndices(oldManifest, newManifest); + } + + public void testCleanupAllPreV7() throws Exception { + checkCleanupAll(containsString(PRE_V7_MESSAGE)); + } + + private void checkCleanupAll(Matcher additionalOutputMatcher) throws Exception { + int shardCount = randomInt(10); + boolean verbose = randomBoolean(); + createIndexDataFiles(dataMasterSettings, shardCount); + + String messageText = NodeRepurposeCommand.noMasterMessage( + 1, + environment.dataFiles().length*shardCount, + environment.dataFiles().length); + + Matcher outputMatcher = allOf( + containsString(messageText), + additionalOutputMatcher, + conditionalNot(containsString("testUUID"), verbose == false), + conditionalNot(containsString("testIndex"), verbose == false) + ); + + verifyUnchangedOnAbort(noDataNoMasterSettings, outputMatcher, verbose); + + // verify test setup + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoMasterSettings, environment).close()); + + verifySuccess(noDataNoMasterSettings, outputMatcher, verbose); + + //verify cleaned. + new NodeEnvironment(noDataNoMasterSettings, environment).close(); + } + + public void testCleanupShardData() throws Exception { + int shardCount = randomIntBetween(1, 10); + boolean verbose = randomBoolean(); + Manifest manifest = randomBoolean() ? createManifest(INDEX) : null; + + createIndexDataFiles(dataMasterSettings, shardCount); + + Matcher matcher = allOf( + containsString(NodeRepurposeCommand.shardMessage(environment.dataFiles().length * shardCount, 1)), + conditionalNot(containsString("testUUID"), verbose == false), + conditionalNot(containsString("testIndex"), verbose == false) + ); + + verifyUnchangedOnAbort(noDataMasterSettings, + matcher, verbose); + + // verify test setup + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataMasterSettings, environment).close()); + + verifySuccess(noDataMasterSettings, matcher, verbose); + + //verify clean. + new NodeEnvironment(noDataMasterSettings, environment).close(); + + if (manifest != null) { + Manifest newManifest = loadManifest(); + assertThat(newManifest.getIndexGenerations().entrySet(), hasSize(1)); + assertManifestIdenticalExceptIndices(manifest, newManifest); + } + } + + private static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { + verifySuccess(settings, outputMatcher, verbose, 0); + } + + static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose, int ordinal) throws Exception { + withTerminal(verbose, outputMatcher, terminal -> { + terminal.addTextInput(randomFrom("y", "Y")); + executeRepurposeCommand(terminal, settings, ordinal); + assertThat(terminal.getOutput(), containsString("Node successfully repurposed")); + }); + } + + private void verifyUnchangedOnAbort(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { + withTerminal(verbose, outputMatcher, terminal -> { + terminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); + verifyUnchangedDataFiles(() -> { + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> executeRepurposeCommand(terminal, settings, 0)); + assertThat(exception.getMessage(), containsString(NodeRepurposeCommand.ABORTED_BY_USER_MSG)); + }); + }); + } + + private void verifyNoQuestions(Settings settings, Matcher outputMatcher) throws Exception { + withTerminal(false, outputMatcher, terminal -> { + executeRepurposeCommand(terminal, settings, 0); + }); + } + + private static void withTerminal(boolean verbose, Matcher outputMatcher, + CheckedConsumer consumer) throws Exception { + MockTerminal terminal = new MockTerminal(); + if (verbose) { + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + } + + consumer.accept(terminal); + + assertThat(terminal.getOutput(), outputMatcher); + + expectThrows(IllegalStateException.class, "Must consume input", () -> terminal.readText("")); + } + + private static void executeRepurposeCommand(MockTerminal terminal, Settings settings, int ordinal) throws Exception { + NodeRepurposeCommand nodeRepurposeCommand = new NodeRepurposeCommand(); + OptionSet options = nodeRepurposeCommand.getParser() + .parse(ordinal != 0 ? new String[]{"--ordinal", Integer.toString(ordinal)} : new String[0]); + Environment env = TestEnvironment.newEnvironment(settings); + nodeRepurposeCommand.testExecute(terminal, options, env); + } + + private Manifest createManifest(Index index) throws org.elasticsearch.gateway.WriteStateException { + Manifest manifest = new Manifest(randomIntBetween(1,100), randomIntBetween(1,100), randomIntBetween(1,100), + index != null ? Collections.singletonMap(index, randomLongBetween(1,100)) : Collections.emptyMap()); + Manifest.FORMAT.writeAndCleanup(manifest, nodePaths); + return manifest; + } + + private Manifest loadManifest() throws IOException { + return Manifest.FORMAT.loadLatestState(logger, new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), nodePaths); + } + + private void assertManifestIdenticalExceptIndices(Manifest oldManifest, Manifest newManifest) { + assertEquals(oldManifest.getGlobalGeneration(), newManifest.getGlobalGeneration()); + assertEquals(oldManifest.getClusterStateVersion(), newManifest.getClusterStateVersion()); + assertEquals(oldManifest.getCurrentTerm(), newManifest.getCurrentTerm()); + } + + private void createIndexDataFiles(Settings settings, int shardCount) throws IOException { + int shardDataDirNumber = randomInt(10); + try (NodeEnvironment env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))) { + IndexMetaData.FORMAT.write(IndexMetaData.builder(INDEX.getName()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .build(), env.indexPaths(INDEX)); + for (Path path : env.indexPaths(INDEX)) { + for (int i = 0; i < shardCount; ++i) { + Files.createDirectories(path.resolve(Integer.toString(shardDataDirNumber))); + shardDataDirNumber += randomIntBetween(1,10); + } + } + } + } + + private void verifyUnchangedDataFiles(CheckedRunnable runnable) throws Exception { + long before = digestPaths(); + runnable.run(); + long after = digestPaths(); + assertEquals("Must not touch files", before, after); + } + + private long digestPaths() { + // use a commutative digest to avoid dependency on file system order. + return Arrays.stream(environment.dataFiles()).mapToLong(this::digestPath).sum(); + } + + private long digestPath(Path path) { + try (Stream paths = Files.walk(path)) { + return paths.mapToLong(this::digestSinglePath).sum(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private long digestSinglePath(Path path) { + if (Files.isDirectory(path)) + return path.toString().hashCode(); + else + return path.toString().hashCode() + digest(readAllBytes(path)); + + } + + private byte[] readAllBytes(Path path) { + try { + return Files.readAllBytes(path); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private long digest(byte[] bytes) { + long result = 0; + for (byte b : bytes) { + result *= 31; + result += b; + } + return result; + } + + static Matcher conditionalNot(Matcher matcher, boolean condition) { + return condition ? not(matcher) : matcher; + } +} diff --git a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java index cae33db90a6bc..999d80586fea4 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java @@ -33,10 +33,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -47,7 +45,6 @@ import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; import static org.elasticsearch.gateway.ClusterStateUpdaters.addStateNotRecoveredBlock; -import static org.elasticsearch.gateway.ClusterStateUpdaters.closeBadIndices; import static org.elasticsearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.elasticsearch.gateway.ClusterStateUpdaters.mixCurrentStateAndRecoveredState; import static org.elasticsearch.gateway.ClusterStateUpdaters.recoverClusterBlocks; @@ -59,8 +56,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; public class ClusterStateUpdatersTests extends ESTestCase { @@ -201,32 +196,6 @@ public void testAddStateNotRecoveredBlock() { assertTrue(newState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); } - public void testCloseBadIndices() throws IOException { - final IndicesService indicesService = mock(IndicesService.class); - final IndexMetaData good = createIndexMetaData("good", Settings.EMPTY); - final IndexMetaData bad = createIndexMetaData("bad", Settings.EMPTY); - final IndexMetaData ugly = IndexMetaData.builder(createIndexMetaData("ugly", Settings.EMPTY)) - .state(IndexMetaData.State.CLOSE) - .build(); - - final ClusterState initialState = ClusterState - .builder(ClusterState.EMPTY_STATE) - .metaData(MetaData.builder() - .put(good, false) - .put(bad, false) - .put(ugly, false) - .build()) - .build(); - - doThrow(new RuntimeException("test")).when(indicesService).verifyIndexMetadata(bad, bad); - doThrow(new AssertionError("ugly index is already closed")).when(indicesService).verifyIndexMetadata(ugly, ugly); - - final ClusterState newState = closeBadIndices(initialState, indicesService); - assertThat(newState.metaData().index(good.getIndex()).getState(), equalTo(IndexMetaData.State.OPEN)); - assertThat(newState.metaData().index(bad.getIndex()).getState(), equalTo(IndexMetaData.State.CLOSE)); - assertThat(newState.metaData().index(ugly.getIndex()).getState(), equalTo(IndexMetaData.State.CLOSE)); - } - public void testUpdateRoutingTable() { final int numOfShards = randomIntBetween(1, 10); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index ebdae985a39c7..5a0ef2df5f410 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -34,12 +34,15 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -51,12 +54,14 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @@ -369,9 +374,20 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); - // ensureGreen(closedIndex) waits for the index to show up in the metadata - // this is crucial otherwise the state call below might not contain the index yet - ensureGreen(metaData.getIndex().getName()); + // check that the cluster does not keep reallocating shards + assertBusy(() -> { + final RoutingTable routingTable = client().admin().cluster().prepareState().get().getState().routingTable(); + final IndexRoutingTable indexRoutingTable = routingTable.index("test"); + assertNotNull(indexRoutingTable); + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + assertTrue(shardRoutingTable.primaryShard().unassigned()); + assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_NO, + shardRoutingTable.primaryShard().unassignedInfo().getLastAllocationStatus()); + assertThat(shardRoutingTable.primaryShard().unassignedInfo().getNumFailedAllocations(), greaterThan(0)); + } + }, 60, TimeUnit.SECONDS); + client().admin().indices().prepareClose("test").get(); + state = client().admin().cluster().prepareState().get().getState(); assertEquals(IndexMetaData.State.CLOSE, state.getMetaData().index(metaData.getIndex()).getState()); assertEquals("classic", state.getMetaData().index(metaData.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); @@ -432,11 +448,19 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); - // ensureGreen(closedIndex) waits for the index to show up in the metadata - // this is crucial otherwise the state call below might not contain the index yet - ensureGreen(metaData.getIndex().getName()); - state = client().admin().cluster().prepareState().get().getState(); - assertEquals(IndexMetaData.State.CLOSE, state.getMetaData().index(metaData.getIndex()).getState()); + // check that the cluster does not keep reallocating shards + assertBusy(() -> { + final RoutingTable routingTable = client().admin().cluster().prepareState().get().getState().routingTable(); + final IndexRoutingTable indexRoutingTable = routingTable.index("test"); + assertNotNull(indexRoutingTable); + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + assertTrue(shardRoutingTable.primaryShard().unassigned()); + assertEquals(UnassignedInfo.AllocationStatus.DECIDERS_NO, + shardRoutingTable.primaryShard().unassignedInfo().getLastAllocationStatus()); + assertThat(shardRoutingTable.primaryShard().unassignedInfo().getNumFailedAllocations(), greaterThan(0)); + } + }, 60, TimeUnit.SECONDS); + client().admin().indices().prepareClose("test").get(); // try to open it with the broken setting - fail again! ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> client().admin().indices().prepareOpen("test").get()); @@ -466,7 +490,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { final MetaData metaData = state.getMetaData(); final MetaData brokenMeta = MetaData.builder(metaData).persistentSettings(Settings.builder() .put(metaData.persistentSettings()).put("this.is.unknown", true) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), "broken").build()).build(); + .put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), "broken").build()).build(); internalCluster().fullRestart(new RestartCallback(){ @Override public Settings onNodeStopped(String nodeName) throws Exception { @@ -480,7 +504,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { state = client().admin().cluster().prepareState().get().getState(); assertEquals("true", state.metaData().persistentSettings().get("archived.this.is.unknown")); assertEquals("broken", state.metaData().persistentSettings().get("archived." - + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey())); + + MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey())); // delete these settings client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")).get(); @@ -488,7 +512,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { state = client().admin().cluster().prepareState().get().getState(); assertNull(state.metaData().persistentSettings().get("archived.this.is.unknown")); assertNull(state.metaData().persistentSettings().get("archived." - + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey())); + + MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey())); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 4b0e431c66352..3ea0663d7d4c0 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -29,8 +29,10 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.ElectionSchedulerFactory; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -577,4 +579,16 @@ public Settings onNodeStopped(String nodeName) throws Exception { // start another node so cluster consistency checks won't time out due to the lack of state internalCluster().startNode(); } + + public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { + internalCluster().startNodes(3, + Settings.builder().put(ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING.getKey(), + "2ms").build()); + createIndex("test", Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms").build()); + ensureGreen("test"); + internalCluster().fullRestart(); + ensureGreen("test"); + } } diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 77303995f7494..8be9a991d17e9 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -441,7 +441,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); //Version from Lucene index @@ -464,7 +464,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -489,7 +489,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); @@ -515,7 +515,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 64a2fa69bcbd5..b3e6557b187ae 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -561,4 +561,20 @@ public void testUpdateSoftDeletesFails() { Settings.builder(), Settings.builder(), "index")); assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updateable")); } + + public void testSoftDeletesDefaultSetting() { + // enabled by default on 7.0+ or later + { + Version createdVersion = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); + assertTrue(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); + } + // disabled by default on the previous versions + { + Version prevVersion = VersionUtils.randomVersionBetween( + random(), Version.V_6_5_0, VersionUtils.getPreviousVersion(Version.V_7_0_0)); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), prevVersion).build(); + assertFalse(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 25b0c9e00cb7d..3703c6d1c80b3 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -81,6 +81,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -155,6 +156,7 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -262,7 +264,7 @@ public void testVersionMapAfterAutoIDDocument() throws IOException { } } - public void testSegments() throws Exception { + public void testSegmentsWithoutSoftDeletes() throws Exception { Settings settings = Settings.builder() .put(defaultSettings.getSettings()) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); @@ -592,6 +594,68 @@ public void testSegmentsStatsIncludingFileSizes() throws Exception { } } + public void testSegmentsWithSoftDeletes() throws Exception { + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, + null, globalCheckpoint::get))) { + assertThat(engine.segments(false), empty()); + int numDocsFirstSegment = randomIntBetween(5, 50); + Set liveDocsFirstSegment = new HashSet<>(); + for (int i = 0; i < numDocsFirstSegment; i++) { + String id = Integer.toString(i); + ParsedDocument doc = testParsedDocument(id, null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + liveDocsFirstSegment.add(id); + } + engine.refresh("test"); + List segments = engine.segments(randomBoolean()); + assertThat(segments, hasSize(1)); + assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size())); + assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); + assertFalse(segments.get(0).committed); + int deletes = 0; + int updates = 0; + int appends = 0; + int iterations = scaledRandomIntBetween(1, 50); + for (int i = 0; i < iterations && liveDocsFirstSegment.isEmpty() == false; i++) { + String idToUpdate = randomFrom(liveDocsFirstSegment); + liveDocsFirstSegment.remove(idToUpdate); + ParsedDocument doc = testParsedDocument(idToUpdate, null, testDocument(), B_1, null); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc), primaryTerm.get())); + deletes++; + } else { + engine.index(indexForDoc(doc)); + updates++; + } + if (randomBoolean()) { + engine.index(indexForDoc(testParsedDocument(UUIDs.randomBase64UUID(), null, testDocument(), B_1, null))); + appends++; + } + } + boolean committed = randomBoolean(); + if (committed) { + engine.flush(); + } + engine.refresh("test"); + segments = engine.segments(randomBoolean()); + assertThat(segments, hasSize(2)); + assertThat(segments.get(0).getNumDocs(), equalTo(liveDocsFirstSegment.size())); + assertThat(segments.get(0).getDeletedDocs(), equalTo(updates + deletes)); + assertThat(segments.get(0).committed, equalTo(committed)); + + assertThat(segments.get(1).getNumDocs(), equalTo(updates + appends)); + assertThat(segments.get(1).getDeletedDocs(), equalTo(deletes)); // delete tombstones + assertThat(segments.get(1).committed, equalTo(committed)); + } + } + public void testCommitStats() throws IOException { final AtomicLong maxSeqNo = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong localCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -677,7 +741,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test")); assertThat(counter.get(), equalTo(2)); @@ -695,7 +759,7 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertFalse(engine.isRecovering()); doc = testParsedDocument("2", null, testDocumentWithTextField(), SOURCE, null); @@ -728,7 +792,7 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } trimUnsafeCommits(engine.config()); try (Engine recoveringEngine = new InternalEngine(engine.config())) { - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); @@ -765,7 +829,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } }; assertThat(getTranslog(recoveringEngine).stats().getUncommittedOperations(), equalTo(docs)); - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertTrue(committed.get()); } finally { @@ -800,7 +864,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { initialEngine.close(); trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); @@ -836,7 +900,7 @@ public void testRecoveryFromTranslogUpToSeqNo() throws IOException { } trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertThat(engine.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); @@ -844,7 +908,7 @@ public void testRecoveryFromTranslogUpToSeqNo() throws IOException { trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { long upToSeqNo = randomLongBetween(globalCheckpoint.get(), maxSeqNo); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, upToSeqNo); assertThat(engine.getLocalCheckpoint(), equalTo(upToSeqNo)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(upToSeqNo)); @@ -1261,7 +1325,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); } @@ -1282,7 +1346,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.close(); trimUnsafeCommits(config); engine = new InternalEngine(config); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); @@ -2381,7 +2445,7 @@ public void testSeqNoAndCheckpoints() throws IOException { trimUnsafeCommits(initialEngine.engineConfig); try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())) { - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(primarySeqNo, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); @@ -2492,6 +2556,7 @@ public void testConcurrentWritesAndCommits() throws Exception { prevLocalCheckpoint = localCheckpoint; prevMaxSeqNo = maxSeqNo; } + IOUtils.close(commits); } } @@ -2736,7 +2801,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); } assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("3", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2755,7 +2820,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(2, engine.getTranslog().currentFileGeneration()); assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); @@ -2770,7 +2835,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("no changes - nothing to commit", "1", @@ -2878,7 +2943,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s } } }) { - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); @@ -2891,7 +2956,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier))) { - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertVisibleCount(engine, 1); final long committedGen = Long.valueOf( @@ -2962,7 +3027,7 @@ public void testTranslogReplay() throws IOException { trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertVisibleCount(engine, numDocs, false); @@ -3231,7 +3296,7 @@ public void testHandleDocumentFailure() throws Exception { final ParsedDocument doc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null); AtomicReference throwingIndexWriter = new AtomicReference<>(); - try (Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, + try (InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, (directory, iwc) -> { throwingIndexWriter.set(new ThrowingIndexWriter(directory, iwc)); return throwingIndexWriter.get(); @@ -3296,16 +3361,13 @@ public BytesRef binaryValue() { engine.close(); } // now the engine is closed check we respond correctly - try { - if (randomBoolean()) { - engine.index(indexForDoc(doc1)); - } else { - engine.delete(new Engine.Delete("test", "", newUid(doc1), primaryTerm.get())); - } - fail("engine should be closed"); - } catch (Exception e) { - assertThat(e, instanceOf(AlreadyClosedException.class)); - } + expectThrows(AlreadyClosedException.class, () -> engine.index(indexForDoc(doc1))); + expectThrows(AlreadyClosedException.class, + () -> engine.delete(new Engine.Delete("test", "", newUid(doc1), primaryTerm.get()))); + expectThrows(AlreadyClosedException.class, () -> engine.noOp( + new Engine.NoOp(engine.getLocalCheckpointTracker().generateSeqNo(), + engine.config().getPrimaryTermSupplier().getAsLong(), + randomFrom(Engine.Operation.Origin.values()), randomNonNegativeLong(), "test"))); } } } @@ -3710,7 +3772,7 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { InternalEngine engine = new InternalEngine(configSupplier.apply(store))) { assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), @@ -4078,7 +4140,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); recoveringEngine.fillSeqNoGaps(2); assertThat(recoveringEngine.getLocalCheckpoint(), greaterThanOrEqualTo((long) (docs - 1))); @@ -4191,7 +4253,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); } }; - noOpEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + noOpEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); noOpEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = "filling gaps"; @@ -4428,7 +4490,7 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException { } trimUnsafeCommits(engineConfig); try (InternalEngine engine = new InternalEngine(engineConfig)) { - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, globalCheckpoint.get()); engine.restoreLocalHistoryFromTranslog(translogHandler); assertThat(getDocIds(engine, true), equalTo(prevDocs)); @@ -4476,7 +4538,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, getTranslog(recoveringEngine).stats().getUncommittedOperations()); - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpoint()); @@ -4513,7 +4575,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { if (flushed) { assertThat(recoveringEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); } - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(maxSeqIDOnReplica, recoveringEngine.getSeqNoStats(-1).getMaxSeqNo()); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpoint()); @@ -4708,7 +4770,7 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s super.commitIndexWriter(writer, translog, syncId); } }) { - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { @@ -5306,7 +5368,8 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); final AtomicLong retentionLeasesVersion = new AtomicLong(); - final AtomicReference retentionLeasesHolder = new AtomicReference<>(RetentionLeases.EMPTY); + final AtomicReference retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList())); final List operations = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), 2, 10, 300, "2"); Randomness.shuffle(operations); @@ -5359,21 +5422,11 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { engine.flush(true, true); assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), equalTo(engine.getMinRetainedSeqNo())); - final RetentionLeases leases = retentionLeasesHolder.get(); - if (leases.leases().isEmpty()) { - assertThat( - engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), - equalTo("primary_term:" + primaryTerm + ";version:" + retentionLeasesVersion.get() + ";")); - } else { - assertThat( - engine.getLastCommittedSegmentInfos().getUserData().get(Engine.RETENTION_LEASES), - equalTo(RetentionLeases.encodeRetentionLeases(leases))); - } } if (rarely()) { engine.forceMerge(randomBoolean()); } - try (Closeable ignored = engine.acquireRetentionLockForPeerRecovery()) { + try (Closeable ignored = engine.acquireRetentionLock()) { long minRetainSeqNos = engine.getMinRetainedSeqNo(); assertThat(minRetainSeqNos, lessThanOrEqualTo(globalCheckpoint.get() + 1)); Long[] expectedOps = existingSeqNos.stream().filter(seqno -> seqno >= minRetainSeqNos).toArray(Long[]::new); @@ -5493,7 +5546,7 @@ public void testTrackMaxSeqNoOfUpdatesOrDeletesOnPrimary() throws Exception { Set liveDocIds = new HashSet<>(); engine = new InternalEngine(engine.config()); assertThat(engine.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-2L)); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); int numOps = between(1, 500); for (int i = 0; i < numOps; i++) { long currentMaxSeqNoOfUpdates = engine.getMaxSeqNoOfUpdatesOrDeletes(); @@ -5564,7 +5617,7 @@ public void testRebuildLocalCheckpointTracker() throws Exception { "seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + "checkpoint=" + tracker.getCheckpoint(), tracker.contains(op.seqNo()), equalTo(seqNosInSafeCommit.contains(op.seqNo()))); } - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertThat(getDocIds(engine, true), equalTo(docs)); } @@ -5651,4 +5704,82 @@ public void testStoreHonorsLuceneVersion() throws IOException { } } } + + public void testMaxSeqNoInCommitUserData() throws Exception { + AtomicBoolean running = new AtomicBoolean(true); + Thread rollTranslog = new Thread(() -> { + while (running.get() && engine.getTranslog().currentFileGeneration() < 500) { + engine.rollTranslogGeneration(); // make adding operations to translog slower + } + }); + rollTranslog.start(); + + Thread indexing = new Thread(() -> { + long seqNo = 0; + while (running.get() && seqNo <= 1000) { + try { + String id = Long.toString(between(1, 50)); + if (randomBoolean()) { + ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 1L, seqNo, false)); + } else { + engine.delete(replicaDeleteForDoc(id, 1L, seqNo, 0L)); + } + seqNo++; + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + indexing.start(); + + int numCommits = between(5, 20); + for (int i = 0; i < numCommits; i++) { + engine.flush(false, true); + } + running.set(false); + indexing.join(); + rollTranslog.join(); + assertMaxSeqNoInCommitUserData(engine); + } + + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8809") + public void testRefreshAndFailEngineConcurrently() throws Exception { + AtomicBoolean stopped = new AtomicBoolean(); + Semaphore indexedDocs = new Semaphore(0); + Thread indexer = new Thread(() -> { + while (stopped.get() == false) { + String id = Integer.toString(randomIntBetween(1, 100)); + try { + engine.index(indexForDoc(createParsedDoc(id, null))); + indexedDocs.release(); + } catch (IOException e) { + throw new AssertionError(e); + } catch (AlreadyClosedException e) { + return; + } + } + }); + + Thread refresher = new Thread(() -> { + while (stopped.get() == false) { + try { + engine.refresh("test", randomFrom(Engine.SearcherScope.values())); + } catch (AlreadyClosedException e) { + return; + } + } + }); + indexer.start(); + refresher.start(); + indexedDocs.acquire(randomIntBetween(1, 100)); + try { + engine.failEngine("test", new IOException("simulated error")); + } finally { + stopped.set(true); + indexer.join(); + refresher.join(); + } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index b345afe9b8f89..ce54259c2deea 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -96,7 +96,7 @@ public void testReadOnlyEngine() throws Exception { // Close and reopen the main engine InternalEngineTests.trimUnsafeCommits(config); try (InternalEngine recoveringEngine = new InternalEngine(config)) { - recoveringEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + recoveringEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); @@ -189,4 +189,57 @@ public void testReadOnly() throws IOException { } } } + + /** + * Test that {@link ReadOnlyEngine#verifyEngineBeforeIndexClosing()} never fails + * whatever the value of the global checkpoint to check is. + */ + public void testVerifyShardBeforeIndexClosingIsNoOp() throws IOException { + IOUtils.close(engine, store); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + store.createEmpty(Version.CURRENT.luceneVersion); + try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) { + globalCheckpoint.set(randomNonNegativeLong()); + try { + readOnlyEngine.verifyEngineBeforeIndexClosing(); + } catch (final IllegalStateException e) { + fail("Read-only engine pre-closing verifications failed"); + } + } + } + } + + public void testRecoverFromTranslogAppliesNoOperations() throws IOException { + IOUtils.close(engine, store); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + int numDocs = scaledRandomIntBetween(10, 1000); + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < numDocs; i++) { + if (rarely()) { + continue; // gap in sequence number + } + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + if (rarely()) { + engine.flush(); + } + globalCheckpoint.set(i); + } + engine.syncTranslog(); + engine.flushAndClose(); + } + try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) { + final TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), config.getIndexSettings()); + readOnlyEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); + readOnlyEngine.recoverFromTranslog(translogHandler, randomNonNegativeLong()); + + assertThat(translogHandler.appliedOperations(), equalTo(0L)); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index 8257aa99d0486..3c71e4fede3d5 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -32,12 +32,11 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -98,7 +97,9 @@ public void testSoftDeletesRetentionLock() { .min() .orElse(Long.MAX_VALUE); long retainedSeqNo = - Math.min(safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, globalCheckpoint.get() - retainedOps)) + 1; + Math.min( + 1 + safeCommitCheckpoint, + Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps)); minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); } assertThat(retentionQuery.getNumDims(), equalTo(1)); @@ -113,32 +114,91 @@ public void testSoftDeletesRetentionLock() { .min() .orElse(Long.MAX_VALUE); long retainedSeqNo = - Math.min(safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, globalCheckpoint.get() - retainedOps)) + 1; + Math.min(1 + safeCommitCheckpoint, Math.min(minimumRetainingSequenceNumber, 1 + globalCheckpoint.get() - retainedOps)); minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); } - public void testAlwaysFetchLatestRetentionLeases() { - final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); + public void testWhenGlobalCheckpointDictatesThePolicy() { + final int retentionOperations = randomIntBetween(0, 1024); + final AtomicLong globalCheckpoint = new AtomicLong(randomLongBetween(0, Long.MAX_VALUE - 2)); final Collection leases = new ArrayList<>(); - final int numLeases = randomIntBetween(0, 10); - for (int i = 0; i < numLeases; i++) { - leases.add(new RetentionLease(Integer.toString(i), randomLongBetween(0, 1000), randomNonNegativeLong(), "test")); + final int numberOfLeases = randomIntBetween(0, 16); + for (int i = 0; i < numberOfLeases; i++) { + // setup leases where the minimum retained sequence number is more than the policy dictated by the global checkpoint + leases.add(new RetentionLease( + Integer.toString(i), + randomLongBetween(1 + globalCheckpoint.get() - retentionOperations + 1, Long.MAX_VALUE), + randomNonNegativeLong(), "test")); } + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); final Supplier leasesSupplier = () -> new RetentionLeases( - randomNonNegativeLong(), - randomNonNegativeLong(), + primaryTerm, + version, Collections.unmodifiableCollection(new ArrayList<>(leases))); final SoftDeletesPolicy policy = - new SoftDeletesPolicy(globalCheckpoint::get, randomIntBetween(1, 1000), randomIntBetween(0, 1000), leasesSupplier); - if (randomBoolean()) { - policy.acquireRetentionLock(); + new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier); + // set the local checkpoint of the safe commit to more than the policy dicated by the global checkpoint + final long localCheckpointOfSafeCommit = randomLongBetween(1 + globalCheckpoint.get() - retentionOperations + 1, Long.MAX_VALUE); + policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit); + assertThat(policy.getMinRetainedSeqNo(), equalTo(1 + globalCheckpoint.get() - retentionOperations)); + } + + public void testWhenLocalCheckpointOfSafeCommitDictatesThePolicy() { + final int retentionOperations = randomIntBetween(0, 1024); + final long localCheckpointOfSafeCommit = randomLongBetween(-1, Long.MAX_VALUE - retentionOperations - 1); + final AtomicLong globalCheckpoint = + new AtomicLong(randomLongBetween(Math.max(0, localCheckpointOfSafeCommit + retentionOperations), Long.MAX_VALUE - 1)); + final Collection leases = new ArrayList<>(); + final int numberOfLeases = randomIntBetween(0, 16); + for (int i = 0; i < numberOfLeases; i++) { + leases.add(new RetentionLease( + Integer.toString(i), + randomLongBetween(1 + localCheckpointOfSafeCommit + 1, Long.MAX_VALUE), // leases are for more than the local checkpoint + randomNonNegativeLong(), "test")); } - if (numLeases == 0) { - assertThat(policy.getRetentionPolicy().v2().leases(), empty()); - } else { - assertThat(policy.getRetentionPolicy().v2().leases(), contains(leases.toArray(new RetentionLease[0]))); + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final Supplier leasesSupplier = + () -> new RetentionLeases( + primaryTerm, + version, + Collections.unmodifiableCollection(new ArrayList<>(leases))); + + final SoftDeletesPolicy policy = + new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier); + policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit); + assertThat(policy.getMinRetainedSeqNo(), equalTo(1 + localCheckpointOfSafeCommit)); + } + + public void testWhenRetentionLeasesDictateThePolicy() { + final int retentionOperations = randomIntBetween(0, 1024); + final Collection leases = new ArrayList<>(); + final int numberOfLeases = randomIntBetween(1, 16); + for (int i = 0; i < numberOfLeases; i++) { + leases.add(new RetentionLease( + Integer.toString(i), + randomLongBetween(0, Long.MAX_VALUE - retentionOperations - 1), + randomNonNegativeLong(), "test")); } + final OptionalLong minimumRetainingSequenceNumber = leases.stream().mapToLong(RetentionLease::retainingSequenceNumber).min(); + assert minimumRetainingSequenceNumber.isPresent() : leases; + final long localCheckpointOfSafeCommit = randomLongBetween(minimumRetainingSequenceNumber.getAsLong(), Long.MAX_VALUE - 1); + final AtomicLong globalCheckpoint = + new AtomicLong(randomLongBetween(minimumRetainingSequenceNumber.getAsLong() + retentionOperations, Long.MAX_VALUE - 1)); + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final Supplier leasesSupplier = + () -> new RetentionLeases( + primaryTerm, + version, + Collections.unmodifiableCollection(new ArrayList<>(leases))); + final SoftDeletesPolicy policy = + new SoftDeletesPolicy(globalCheckpoint::get, 0, retentionOperations, leasesSupplier); + policy.setLocalCheckpointOfSafeCommit(localCheckpointOfSafeCommit); + assertThat(policy.getMinRetainedSeqNo(), equalTo(minimumRetainingSequenceNumber.getAsLong())); } + } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java index 72d890edc795d..6c199fefbe36c 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.fielddata; +import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.test.ESTestCase; @@ -28,31 +29,30 @@ public class ScriptDocValuesGeoPointsTests extends ESTestCase { - private static MultiGeoPointValues wrap(final GeoPoint... points) { + private static MultiGeoPointValues wrap(GeoPoint[][] points) { return new MultiGeoPointValues() { - int docID = -1; + GeoPoint[] current; int i; @Override public GeoPoint nextValue() { - if (docID != 0) { - fail(); - } - return points[i++]; + return current[i++]; } @Override public boolean advanceExact(int docId) { - docID = docId; - return points.length > 0; + if (docId < points.length) { + current = points[docId]; + } else { + current = new GeoPoint[0]; + } + i = 0; + return current.length > 0; } @Override public int docValueCount() { - if (docID != 0) { - return 0; - } - return points.length; + return current.length; } }; } @@ -71,7 +71,8 @@ public void testGeoGetLatLon() throws IOException { final double lon1 = randomLon(); final double lon2 = randomLon(); - final MultiGeoPointValues values = wrap(new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2)); + GeoPoint[][] points = {{new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2)}}; + final MultiGeoPointValues values = wrap(points); final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); script.setNextDocId(1); @@ -88,11 +89,13 @@ public void testGeoGetLatLon() throws IOException { public void testGeoDistance() throws IOException { final double lat = randomLat(); final double lon = randomLon(); - final MultiGeoPointValues values = wrap(new GeoPoint(lat, lon)); + GeoPoint[][] points = {{new GeoPoint(lat, lon)}}; + final MultiGeoPointValues values = wrap(points); final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); script.setNextDocId(0); - final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(wrap()); + GeoPoint[][] points2 = {new GeoPoint[0]}; + final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(wrap(points2)); emptyScript.setNextDocId(0); final double otherLat = randomLat(); @@ -110,4 +113,34 @@ public void testGeoDistance() throws IOException { script.planeDistanceWithDefault(otherLat, otherLon, 42) / 1000d, 0.01); assertEquals(42, emptyScript.planeDistanceWithDefault(otherLat, otherLon, 42), 0); } + + public void testMissingValues() throws IOException { + GeoPoint[][] points = new GeoPoint[between(3, 10)][]; + for (int d = 0; d < points.length; d++) { + points[d] = new GeoPoint[randomBoolean() ? 0 : between(1, 10)]; + for (int i = 0; i< points[d].length; i++) { + points[d][i] = new GeoPoint(randomLat(), randomLon()); + } + } + final ScriptDocValues.GeoPoints geoPoints = new GeoPoints(wrap(points)); + for (int d = 0; d < points.length; d++) { + geoPoints.setNextDocId(d); + if (points[d].length > 0) { + assertEquals(points[d][0], geoPoints.getValue()); + } else { + Exception e = expectThrows(IllegalStateException.class, () -> geoPoints.getValue()); + assertEquals("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!", e.getMessage()); + e = expectThrows(IllegalStateException.class, () -> geoPoints.get(0)); + assertEquals("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!", e.getMessage()); + } + assertEquals(points[d].length, geoPoints.size()); + for (int i = 0; i < points[d].length; i++) { + assertEquals(points[d][i], geoPoints.get(i)); + } + } + } + + } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java index a5674e4da7d7d..c74725d3774b7 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java @@ -42,10 +42,14 @@ public void testLongs() throws IOException { longs.setNextDocId(d); if (values[d].length > 0) { assertEquals(values[d][0], longs.getValue()); + assertEquals(values[d][0], (long) longs.get(0)); } else { Exception e = expectThrows(IllegalStateException.class, () -> longs.getValue()); assertEquals("A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!", e.getMessage()); + e = expectThrows(IllegalStateException.class, () -> longs.get(0)); + assertEquals("A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!", e.getMessage()); } assertEquals(values[d].length, longs.size()); for (int i = 0; i < values[d].length; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 92178e93d212b..fab34efbb00e2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexOptions; @@ -26,6 +27,8 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -37,6 +40,9 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.plain.SortedNumericDVIndexFieldData; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.MappedFieldType.Relation; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -214,4 +220,33 @@ public void testRangeQuery() throws IOException { () -> ft.rangeQuery(date1, date2, true, true, null, null, null, context)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } + + public void testDateNanoDocValues() throws IOException { + // Create an index with some docValues + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); + Document doc = new Document(); + NumericDocValuesField docValuesField = new NumericDocValuesField("my_date", 1444608000000L); + doc.add(docValuesField); + w.addDocument(doc); + docValuesField.setLongValue(1459641600000L); + w.addDocument(doc); + // Create the doc values reader + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); + IndexSettings indexSettings = new IndexSettings(IndexMetaData.builder("foo").settings(settings).build(), settings); + SortedNumericDVIndexFieldData fieldData = new SortedNumericDVIndexFieldData(indexSettings.getIndex(), "my_date", + IndexNumericFieldData.NumericType.DATE_NANOSECONDS); + // Read index and check the doc values + DirectoryReader reader = DirectoryReader.open(w); + assertTrue(reader.leaves().size() > 0); + AtomicNumericFieldData a = fieldData.load(reader.leaves().get(0).reader().getContext()); + SortedNumericDocValues docValues = a.getLongValues(); + assertEquals(0, docValues.nextDoc()); + assertEquals(1, docValues.nextDoc()); + assertEquals(DocIdSetIterator.NO_MORE_DOCS, docValues.nextDoc()); + reader.close(); + w.close(); + dir.close(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index ea3125accd059..31864abc2e459 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -87,7 +87,7 @@ public ExternalMapper build(BuilderContext context) { BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); GeoPointFieldMapper pointMapper = latLonPointBuilder.build(context); - BaseGeoShapeFieldMapper shapeMapper = (context.indexCreatedVersion().before(Version.V_7_0_0)) + BaseGeoShapeFieldMapper shapeMapper = (context.indexCreatedVersion().before(Version.V_6_6_0)) ? legacyShapeBuilder.build(context) : shapeBuilder.build(context); FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index f5597ecb1f443..2142fca565c9b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -523,5 +523,15 @@ public void testInvalidGeopointValuesIgnored() throws Exception { BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().field("location", "NaN,12").endObject() ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").field("lon", 1).endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").nullField("lon").endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index ba7f5d846840a..77953c0903fd2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -19,11 +19,15 @@ package org.elasticsearch.index.mapper; +import com.carrotsearch.randomizedtesting.annotations.Timeout; + import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; @@ -36,6 +40,7 @@ import java.util.HashSet; import java.util.List; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; public class NumberFieldMapperTests extends AbstractNumericFieldMapperTestCase { @@ -217,45 +222,65 @@ protected void doTestDecimalCoerce(String type) throws IOException { public void testIgnoreMalformed() throws Exception { for (String type : TYPES) { - doTestIgnoreMalformed(type); - } - } + for (Object malformedValue : new Object[] { "a", Boolean.FALSE }) { + String mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("field").field("type", type).endObject().endObject().endObject().endObject()); - private void doTestIgnoreMalformed(String type) throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", type).endObject().endObject() - .endObject().endObject()); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); - assertEquals(mapping, mapper.mappingSource().toString()); + ThrowingRunnable runnable = () -> mapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), XContentType.JSON)); + MapperParsingException e = expectThrows(MapperParsingException.class, runnable); + if (malformedValue instanceof String) { + assertThat(e.getCause().getMessage(), containsString("For input string: \"a\"")); + } else { + assertThat(e.getCause().getMessage(), containsString("Current token")); + assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors")); + } - ThrowingRunnable runnable = () -> mapper.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "a") - .endObject()), - XContentType.JSON)); - MapperParsingException e = expectThrows(MapperParsingException.class, runnable); + mapping = Strings.toString(jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field") + .field("type", type).field("ignore_malformed", true).endObject().endObject().endObject().endObject()); - assertThat(e.getCause().getMessage(), containsString("For input string: \"a\"")); + DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", type).field("ignore_malformed", true).endObject().endObject() - .endObject().endObject()); + ParsedDocument doc = mapper2.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), XContentType.JSON)); - DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - - ParsedDocument doc = mapper2.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", "a") - .endObject()), - XContentType.JSON)); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(0, fields.length); + assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored")); + } + } + } - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(0, fields.length); - assertArrayEquals(new String[] { "field" }, doc.rootDoc().getValues("_ignored")); + /** + * Test that in case the malformed value is an xContent object we throw error regardless of `ignore_malformed` + */ + public void testIgnoreMalformedWithObject() throws Exception { + for (String type : TYPES) { + Object malformedValue = new ToXContentObject() { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field("foo", "bar").endObject(); + } + }; + for (Boolean ignoreMalformed : new Boolean[] { true, false }) { + String mapping = Strings.toString( + jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", type) + .field("ignore_malformed", ignoreMalformed).endObject().endObject().endObject().endObject()); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + assertEquals(mapping, mapper.mappingSource().toString()); + + MapperParsingException e = expectThrows(MapperParsingException.class, + () -> mapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(jsonBuilder().startObject().field("field", malformedValue).endObject()), + XContentType.JSON))); + assertThat(e.getCause().getMessage(), containsString("Current token")); + assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors")); + } + } } public void testRejectNorms() throws IOException { @@ -367,17 +392,20 @@ public void testEmptyName() throws IOException { } } + @Timeout(millis = 30000) public void testOutOfRangeValues() throws IOException { final List> inputs = Arrays.asList( OutOfRangeSpec.of(NumberType.BYTE, "128", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "32768", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "2147483648", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "9223372036854775808", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, "-129", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "-32769", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "-2147483649", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "-9223372036854775809", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "-1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, 128, "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, 32768, "out of range of Java short"), @@ -419,6 +447,10 @@ public void testOutOfRangeValues() throws IOException { e.getCause().getMessage(), containsString(item.message)); } } + + // the following two strings are in-range for a long after coercion + parseRequest(NumberType.LONG, createIndexRequest("9223372036854775807.9")); + parseRequest(NumberType.LONG, createIndexRequest("-9223372036854775808.9")); } private void parseRequest(NumberType type, BytesReference content) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index ef98c67e56ed4..7dc97a66a5ab2 100644 --- a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -151,4 +151,19 @@ public void testFromJson() throws IOException { assertEquals(json, 0.7, parsed.tieBreaker(), 0.0001); assertEquals(json, 2, parsed.innerQueries().size()); } + + public void testRewriteMultipleTimes() throws IOException { + DisMaxQueryBuilder dismax = new DisMaxQueryBuilder(); + dismax.add(new WrapperQueryBuilder(new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()).toString())); + QueryBuilder rewritten = dismax.rewrite(createShardContext()); + DisMaxQueryBuilder expected = new DisMaxQueryBuilder(); + expected.add(new MatchAllQueryBuilder()); + assertEquals(expected, rewritten); + + expected = new DisMaxQueryBuilder(); + expected.add(new MatchAllQueryBuilder()); + QueryBuilder rewrittenAgain = rewritten.rewrite(createShardContext()); + assertEquals(rewrittenAgain, expected); + assertEquals(Rewriteable.rewrite(dismax, createShardContext()), expected); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 54d478a7f6aec..db32c251fd3f4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -146,7 +146,7 @@ public static InnerHitBuilder randomNestedInnerHits() { } public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); - innerHits.setName(randomAlphaOfLengthBetween(1, 16)); + innerHits.setName(randomAlphaOfLengthBetween(5, 16)); innerHits.setFrom(randomIntBetween(0, 32)); innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java index a565db41516a9..15ec8af0af2c5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java @@ -94,6 +94,22 @@ public void testPhrase() throws IOException { } + public void testPhraseWithStopword() throws IOException { + + CannedTokenStream ts = new CannedTokenStream( + new Token("term1", 1, 1, 2), + new Token("term3", 2, 5, 6) + ); + + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), 0, true); + IntervalsSource expected = Intervals.phrase( + Intervals.term("term1"), Intervals.extend(Intervals.term("term3"), 1, 0) + ); + + assertEquals(expected, source); + + } + public void testSimpleSynonyms() throws IOException { CannedTokenStream ts = new CannedTokenStream( @@ -112,16 +128,32 @@ public void testSimpleSynonyms() throws IOException { } - public void testGraphSynonyms() throws IOException { + public void testSimpleSynonymsWithGap() throws IOException { + // term1 [] term2/term3/term4 term5 + CannedTokenStream ts = new CannedTokenStream( + new Token("term1", 1, 2), + new Token("term2", 2, 3, 4), + new Token("term3", 0, 3, 4), + new Token("term4", 0, 3, 4), + new Token("term5", 5, 6) + ); + + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), -1, true); + IntervalsSource expected = Intervals.ordered( + Intervals.term("term1"), + Intervals.extend(Intervals.or(Intervals.term("term2"), Intervals.term("term3"), Intervals.term("term4")), 1, 0), + Intervals.term("term5") + ); + assertEquals(expected, source); + } - // term1 term2/term3:2 term4 term5 + public void testGraphSynonyms() throws IOException { - Token graphToken = new Token("term2", 3, 4); - graphToken.setPositionLength(2); + // term1 term2:2/term3 term4 term5 CannedTokenStream ts = new CannedTokenStream( new Token("term1", 1, 2), - graphToken, + new Token("term2", 1, 3, 4, 2), new Token("term3", 0, 3, 4), new Token("term4", 5, 6), new Token("term5", 6, 7) @@ -138,4 +170,50 @@ public void testGraphSynonyms() throws IOException { } + public void testGraphSynonymsWithGaps() throws IOException { + + // term1 [] term2:4/term3 [] [] term4 term5 + + CannedTokenStream ts = new CannedTokenStream( + new Token("term1", 1, 2), + new Token("term2", 2, 3, 4, 4), + new Token("term3", 0, 3, 4), + new Token("term4", 3, 5, 6), + new Token("term5", 6, 7) + ); + + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), -1, true); + IntervalsSource expected = Intervals.ordered( + Intervals.term("term1"), + Intervals.or( + Intervals.extend(Intervals.term("term2"), 1, 0), + Intervals.phrase( + Intervals.extend(Intervals.term("term3"), 1, 0), + Intervals.extend(Intervals.term("term4"), 2, 0))), + Intervals.term("term5") + ); + + assertEquals(expected, source); + + } + + public void testGraphTerminatesOnGap() throws IOException { + // term1 term2:2/term3 term4 [] term5 + CannedTokenStream ts = new CannedTokenStream( + new Token("term1", 1, 2), + new Token("term2", 1, 2, 3, 2), + new Token("term3", 0, 2, 3), + new Token("term4", 2, 3), + new Token("term5", 2, 6, 7) + ); + + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), -1, true); + IntervalsSource expected = Intervals.ordered( + Intervals.term("term1"), + Intervals.or(Intervals.term("term2"), Intervals.phrase("term3", "term4")), + Intervals.extend(Intervals.term("term5"), 1, 0) + ); + assertEquals(expected, source); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 7181c1de1fb41..9ca77ae3e1bf9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -1210,13 +1210,13 @@ public void testUnmappedFieldRewriteToMatchNoDocs() throws IOException { .field("unmapped_field") .lenient(true) .toQuery(createShardContext()); - assertEquals(new BooleanQuery.Builder().build(), query); + assertEquals(new MatchNoDocsQuery(), query); // Unmapped prefix field query = new QueryStringQueryBuilder("unmapped_field:hello") .lenient(true) .toQuery(createShardContext()); - assertEquals(new BooleanQuery.Builder().build(), query); + assertEquals(new MatchNoDocsQuery(), query); // Unmapped fields query = new QueryStringQueryBuilder("hello") @@ -1224,7 +1224,32 @@ public void testUnmappedFieldRewriteToMatchNoDocs() throws IOException { .field("unmapped_field") .field("another_field") .toQuery(createShardContext()); - assertEquals(new BooleanQuery.Builder().build(), query); + assertEquals(new MatchNoDocsQuery(), query); + + // Multi block + query = new QueryStringQueryBuilder("first unmapped:second") + .field(STRING_FIELD_NAME) + .field("unmapped") + .field("another_unmapped") + .defaultOperator(Operator.AND) + .toQuery(createShardContext()); + BooleanQuery expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "first")), BooleanClause.Occur.MUST) + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .build(); + assertEquals(expected, query); + + query = new SimpleQueryStringBuilder("first unknown:second") + .field("unmapped") + .field("another_unmapped") + .defaultOperator(Operator.AND) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .build(); + assertEquals(expected, query); + } public void testDefaultField() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 3242f343379aa..2bb289ddc11fa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -717,6 +717,17 @@ public void testUnmappedFieldNoTokenWithAndOperator() throws IOException { .add(new TermQuery(new Term(STRING_FIELD_NAME, "second")), BooleanClause.Occur.MUST) .build(); assertEquals(expected, query); + query = new SimpleQueryStringBuilder("first & second") + .field("unmapped") + .field("another_unmapped") + .defaultOperator(Operator.AND) + .toQuery(createShardContext()); + expected = new BooleanQuery.Builder() + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .add(new MatchNoDocsQuery(), BooleanClause.Occur.MUST) + .build(); + assertEquals(expected, query); } public void testNegativeFieldBoost() { diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 725225773a682..74ded2d749099 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -75,7 +75,6 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -83,7 +82,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestCase { @@ -440,17 +438,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception { } } - @TestLogging( - "_root:DEBUG," - + "org.elasticsearch.action.bulk:TRACE," - + "org.elasticsearch.action.get:TRACE," - + "org.elasticsearch.cluster.service:TRACE," - + "org.elasticsearch.discovery:TRACE," - + "org.elasticsearch.indices.cluster:TRACE," - + "org.elasticsearch.indices.recovery:TRACE," - + "org.elasticsearch.index.seqno:TRACE," - + "org.elasticsearch.index.shard:TRACE") - public void testWaitForPendingSeqNo() throws Exception { + public void testDoNotWaitForPendingSeqNo() throws Exception { IndexMetaData metaData = buildIndexMetaData(1); final int pendingDocs = randomIntBetween(1, 5); @@ -500,20 +488,14 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); CountDownLatch recoveryStart = new CountDownLatch(1); - AtomicBoolean opsSent = new AtomicBoolean(false); + AtomicBoolean recoveryDone = new AtomicBoolean(false); final Future recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> { recoveryStart.countDown(); return new RecoveryTarget(indexShard, node, recoveryListener, l -> {}) { @Override - public void indexTranslogOperations( - final List operations, - final int totalTranslogOps, - final long maxSeenAutoIdTimestamp, - final long msu, - final RetentionLeases retentionLeases, - final ActionListener listener) { - opsSent.set(true); - super.indexTranslogOperations(operations, totalTranslogOps, maxSeenAutoIdTimestamp, msu, retentionLeases, listener); + public void finalizeRecovery(long globalCheckpoint, ActionListener listener) { + recoveryDone.set(true); + super.finalizeRecovery(globalCheckpoint, listener); } }; }); @@ -524,7 +506,7 @@ public void indexTranslogOperations( final int indexedDuringRecovery = shards.indexDocs(randomInt(5)); docs += indexedDuringRecovery; - assertFalse("recovery should wait on pending docs", opsSent.get()); + assertBusy(() -> assertTrue("recovery should not wait for on pending docs", recoveryDone.get())); primaryEngineFactory.releaseLatchedIndexers(); pendingDocsDone.await(); @@ -533,10 +515,6 @@ public void indexTranslogOperations( recoveryFuture.get(); assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); - assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), - // we don't know which of the inflight operations made it into the translog range we re-play - both(greaterThanOrEqualTo(docs-indexedDuringRecovery)).and(lessThanOrEqualTo(docs))); - shards.assertAllEqual(docs); } finally { primaryEngineFactory.close(); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java new file mode 100644 index 0000000000000..ce3986f0a2517 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.replication; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; +import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class RetentionLeasesReplicationTests extends ESIndexLevelReplicationTestCase { + + public void testSimpleSyncRetentionLeases() throws Exception { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + try (ReplicationGroup group = createGroup(between(0, 2), settings)) { + group.startAll(); + List leases = new ArrayList<>(); + int iterations = between(1, 100); + CountDownLatch latch = new CountDownLatch(iterations); + for (int i = 0; i < iterations; i++) { + if (leases.isEmpty() == false && rarely()) { + RetentionLease leaseToRemove = randomFrom(leases); + leases.remove(leaseToRemove); + group.removeRetentionLease(leaseToRemove.id(), ActionListener.wrap(latch::countDown)); + } else { + RetentionLease newLease = group.addRetentionLease(Integer.toString(i), randomNonNegativeLong(), "test-" + i, + ActionListener.wrap(latch::countDown)); + leases.add(newLease); + } + } + RetentionLeases leasesOnPrimary = group.getPrimary().getRetentionLeases(); + assertThat(leasesOnPrimary.version(), equalTo((long) iterations)); + assertThat(leasesOnPrimary.primaryTerm(), equalTo(group.getPrimary().getOperationPrimaryTerm())); + assertThat(leasesOnPrimary.leases(), containsInAnyOrder(leases.toArray(new RetentionLease[0]))); + latch.await(); + for (IndexShard replica : group.getReplicas()) { + assertThat(replica.getRetentionLeases(), equalTo(leasesOnPrimary)); + } + } + } + + public void testOutOfOrderRetentionLeasesRequests() throws Exception { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + int numberOfReplicas = between(1, 2); + IndexMetaData indexMetaData = buildIndexMetaData(numberOfReplicas, settings, indexMapping); + try (ReplicationGroup group = new ReplicationGroup(indexMetaData) { + @Override + protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { + listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(shardId, leases))); + } + }) { + group.startAll(); + int numLeases = between(1, 10); + List requests = new ArrayList<>(); + for (int i = 0; i < numLeases; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + group.addRetentionLease(Integer.toString(i), randomNonNegativeLong(), "test-" + i, future); + requests.add(((SyncRetentionLeasesResponse) future.actionGet()).syncRequest); + } + RetentionLeases leasesOnPrimary = group.getPrimary().getRetentionLeases(); + for (IndexShard replica : group.getReplicas()) { + Randomness.shuffle(requests); + requests.forEach(request -> group.executeRetentionLeasesSyncRequestOnReplica(request, replica)); + assertThat(replica.getRetentionLeases(), equalTo(leasesOnPrimary)); + } + } + } + + public void testSyncRetentionLeasesWithPrimaryPromotion() throws Exception { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + int numberOfReplicas = between(2, 4); + IndexMetaData indexMetaData = buildIndexMetaData(numberOfReplicas, settings, indexMapping); + try (ReplicationGroup group = new ReplicationGroup(indexMetaData) { + @Override + protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { + listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(shardId, leases))); + } + }) { + group.startAll(); + int numLeases = between(1, 100); + IndexShard newPrimary = randomFrom(group.getReplicas()); + RetentionLeases latestRetentionLeasesOnNewPrimary = RetentionLeases.EMPTY; + for (int i = 0; i < numLeases; i++) { + PlainActionFuture addLeaseFuture = new PlainActionFuture<>(); + group.addRetentionLease(Integer.toString(i), randomNonNegativeLong(), "test-" + i, addLeaseFuture); + RetentionLeaseSyncAction.Request request = ((SyncRetentionLeasesResponse) addLeaseFuture.actionGet()).syncRequest; + for (IndexShard replica : randomSubsetOf(group.getReplicas())) { + group.executeRetentionLeasesSyncRequestOnReplica(request, replica); + if (newPrimary == replica) { + latestRetentionLeasesOnNewPrimary = request.getRetentionLeases(); + } + } + } + group.promoteReplicaToPrimary(newPrimary).get(); + // we need to make changes to retention leases to sync it to replicas + // since we don't sync retention leases when promoting a new primary. + PlainActionFuture newLeaseFuture = new PlainActionFuture<>(); + group.addRetentionLease("new-lease-after-promotion", randomNonNegativeLong(), "test", newLeaseFuture); + RetentionLeases leasesOnPrimary = group.getPrimary().getRetentionLeases(); + assertThat(leasesOnPrimary.primaryTerm(), equalTo(group.getPrimary().getOperationPrimaryTerm())); + assertThat(leasesOnPrimary.version(), equalTo(latestRetentionLeasesOnNewPrimary.version() + 1L)); + assertThat(leasesOnPrimary.leases(), hasSize(latestRetentionLeasesOnNewPrimary.leases().size() + 1)); + RetentionLeaseSyncAction.Request request = ((SyncRetentionLeasesResponse) newLeaseFuture.actionGet()).syncRequest; + for (IndexShard replica : group.getReplicas()) { + group.executeRetentionLeasesSyncRequestOnReplica(request, replica); + } + for (IndexShard replica : group.getReplicas()) { + assertThat(replica.getRetentionLeases(), equalTo(leasesOnPrimary)); + } + } + } + + static final class SyncRetentionLeasesResponse extends ReplicationResponse { + final RetentionLeaseSyncAction.Request syncRequest; + SyncRetentionLeasesResponse(RetentionLeaseSyncAction.Request syncRequest) { + this.syncRequest = syncRequest; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index ee916dd4c47dd..1a85e29f02090 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -397,6 +397,8 @@ public void testParseGeoPoint() throws IOException { parser.nextToken(); GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point, equalTo(new GeoPoint(lat, lon))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("lat", String.valueOf(lat)).field("lon", String.valueOf(lon)).endObject(); try (XContentParser parser = createParser(json)) { @@ -438,6 +440,21 @@ public void testParseGeoPointStringZValueError() throws IOException { } } + public void testParseGeoPointArrayZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startArray().value(lat).value(lon).value(alt).endArray(); + try (XContentParser parser = createParser(json)) { + parser.nextToken(); + Exception e = expectThrows(ElasticsearchParseException.class, + () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertNull(parser.nextToken()); + } + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -451,6 +468,8 @@ public void testParseGeoPointGeohash() throws IOException { GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point.lat(), allOf(lessThanOrEqualTo(90.0), greaterThanOrEqualTo(-90.0))); assertThat(point.lon(), allOf(lessThanOrEqualTo(180.0), greaterThanOrEqualTo(-180.0))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("geohash", geohashBuilder.toString()).endObject(); try (XContentParser parser = createParser(json)) { @@ -470,6 +489,8 @@ public void testParseGeoPointGeohashWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), containsString("geohash must be a string")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -480,6 +501,8 @@ public void testParseGeoPointLatNoLon() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lon] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -490,6 +513,8 @@ public void testParseGeoPointLonNoLat() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lat] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -500,6 +525,8 @@ public void testParseGeoPointLonWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("longitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -510,6 +537,8 @@ public void testParseGeoPointLatWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("latitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -578,6 +607,9 @@ public void testParseGeoPointArrayWrongType() throws IOException { } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("numeric value expected")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertThat(parser.nextToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index bb526a3470873..0e7cbaa42d119 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -24,16 +24,22 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.IndexSettingsModule; +import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -41,10 +47,12 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.hasToString; public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTestCase { @@ -90,6 +98,60 @@ public void testAddOrRenewRetentionLease() { } } + public void testAddDuplicateRetentionLease() { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, source, ActionListener.wrap(() -> {})); + final long nextRetaininSequenceNumber = randomLongBetween(retainingSequenceNumber, Long.MAX_VALUE); + final RetentionLeaseAlreadyExistsException e = expectThrows( + RetentionLeaseAlreadyExistsException.class, + () -> replicationTracker.addRetentionLease(id, nextRetaininSequenceNumber, source, ActionListener.wrap(() -> {}))); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] already exists"))); + } + + public void testRenewNotFoundRetentionLease() { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final String id = randomAlphaOfLength(8); + final RetentionLeaseNotFoundException e = expectThrows( + RetentionLeaseNotFoundException.class, + () -> replicationTracker.renewRetentionLease(id, randomNonNegativeLong(), randomAlphaOfLength(8))); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] not found"))); + } + public void testAddRetentionLeaseCausesRetentionLeaseSync() { final AllocationId allocationId = AllocationId.newInitializing(); final Map retainingSequenceNumbers = new HashMap<>(); @@ -137,6 +199,130 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { } } + public void testRemoveRetentionLease() { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + final long[] minimumRetainingSequenceNumbers = new long[length]; + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); + } + + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + /* + * Remove from the end since it will make the following assertion easier; we want to ensure that only the intended lease was + * removed. + */ + replicationTracker.removeRetentionLease(Integer.toString(length - i - 1), ActionListener.wrap(() -> {})); + assertRetentionLeases( + replicationTracker, + length - i - 1, + minimumRetainingSequenceNumbers, + primaryTerm, + 1 + length + i, + true, + false); + } + } + + public void testRemoveNotFound() { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final String id = randomAlphaOfLength(8); + final RetentionLeaseNotFoundException e = expectThrows( + RetentionLeaseNotFoundException.class, + () -> replicationTracker.removeRetentionLease(id, ActionListener.wrap(() -> {}))); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] not found"))); + } + + public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { + final AllocationId allocationId = AllocationId.newInitializing(); + final Map retainingSequenceNumbers = new HashMap<>(); + final AtomicBoolean invoked = new AtomicBoolean(); + final AtomicReference reference = new AtomicReference<>(); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + randomNonNegativeLong(), + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> { + // we do not want to hold a lock on the replication tracker in the callback! + assertFalse(Thread.holdsLock(reference.get())); + invoked.set(true); + assertThat( + leases.leases() + .stream() + .collect(Collectors.toMap(RetentionLease::id, RetentionLease::retainingSequenceNumber)), + equalTo(retainingSequenceNumbers)); + }); + reference.set(replicationTracker); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retainingSequenceNumbers.put(id, retainingSequenceNumber); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); + // assert that the new retention lease callback was invoked + assertTrue(invoked.get()); + + // reset the invocation marker so that we can assert the callback was not invoked when removing the lease + invoked.set(false); + retainingSequenceNumbers.remove(id); + replicationTracker.removeRetentionLease(id, ActionListener.wrap(() -> {})); + assertTrue(invoked.get()); + } + } + public void testExpirationOnPrimary() { runExpirationTest(true); } @@ -152,7 +338,7 @@ private void runExpirationTest(final boolean primaryMode) { final Settings settings = Settings .builder() .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); @@ -280,6 +466,152 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { } } + public void testLoadAndPersistRetentionLeases() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + replicationTracker.persistRetentionLeases(path); + assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); + } + + public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + replicationTracker.persistRetentionLeases(path); + + final Tuple retentionLeasesWithGeneration = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + replicationTracker.persistRetentionLeases(path); + final Tuple retentionLeasesWithGenerationAfterUnnecessaryPersistence = + RetentionLeases.FORMAT.loadLatestStateWithGeneration(logger, NamedXContentRegistry.EMPTY, path); + + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v1(), equalTo(retentionLeasesWithGeneration.v1())); + assertThat(retentionLeasesWithGenerationAfterUnnecessaryPersistence.v2(), equalTo(retentionLeasesWithGeneration.v2())); + } + + /** + * Test that we correctly synchronize writing the retention lease state file in {@link ReplicationTracker#persistRetentionLeases(Path)}. + * This test can fail without the synchronization block in that method. + * + * @throws IOException if an I/O exception occurs loading the retention lease state file + */ + public void testPersistRetentionLeasesUnderConcurrency() throws IOException { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + if (rarely() && primaryTerm < Long.MAX_VALUE) { + primaryTerm = randomLongBetween(primaryTerm + 1, Long.MAX_VALUE); + replicationTracker.setOperationPrimaryTerm(primaryTerm); + } + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease( + Integer.toString(i), retainingSequenceNumber, "test-" + i, ActionListener.wrap(() -> {})); + } + + final Path path = createTempDir(); + final int numberOfThreads = randomIntBetween(1, 2 * Runtime.getRuntime().availableProcessors()); + final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); + final Thread[] threads = new Thread[numberOfThreads]; + for (int i = 0; i < numberOfThreads; i++) { + final String id = Integer.toString(length + i); + threads[i] = new Thread(() -> { + try { + barrier.await(); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test-" + id, ActionListener.wrap(() -> {})); + replicationTracker.persistRetentionLeases(path); + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException | WriteStateException e) { + throw new AssertionError(e); + } + }); + threads[i].start(); + } + + try { + // synchronize the threads invoking ReplicationTracker#persistRetentionLeases(Path path) + barrier.await(); + // wait for all the threads to finish + barrier.await(); + for (int i = 0; i < numberOfThreads; i++) { + threads[i].join(); + } + } catch (final BrokenBarrierException | InterruptedException e) { + throw new AssertionError(e); + } + assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); + } + private void assertRetentionLeases( final ReplicationTracker replicationTracker, final int size, diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseActionsTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseActionsTests.java new file mode 100644 index 0000000000000..bff4493321289 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseActionsTests.java @@ -0,0 +1,533 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; + +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.hasToString; + +public class RetentionLeaseActionsTests extends ESSingleNodeTestCase { + + public void testAddAction() { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + final IndicesStatsResponse stats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(1)); + assertNotNull(stats.getShards()[0].getRetentionLeaseStats()); + assertThat(stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease retentionLease = stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(id)); + assertThat(retentionLease.retainingSequenceNumber(), equalTo(retainingSequenceNumber == RETAIN_ALL ? 0L : retainingSequenceNumber)); + assertThat(retentionLease.source(), equalTo(source)); + } + + public void testAddAlreadyExists() { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + final long nextRetainingSequenceNumber = + retainingSequenceNumber == RETAIN_ALL && randomBoolean() ? RETAIN_ALL + : randomLongBetween(Math.max(retainingSequenceNumber, 0L), Long.MAX_VALUE); + + final RetentionLeaseAlreadyExistsException e = expectThrows( + RetentionLeaseAlreadyExistsException.class, + () -> client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest( + indexService.getShard(0).shardId(), + id, + nextRetainingSequenceNumber, + source)) + .actionGet()); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] already exists"))); + } + + public void testRenewAction() throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + + /* + * When we renew the lease, we want to ensure that the timestamp on the thread pool clock has advanced. To do this, we sample how + * often the thread pool clock advances based on the following setting. After we add the initial lease we sample the relative time. + * Immediately before the renewal of the lease, we sleep long enough to ensure that an estimated time interval has elapsed, and + * sample the thread pool to ensure the clock has in fact advanced. + */ + final TimeValue estimatedTimeInterval = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(getInstanceFromNode(Node.class).settings()); + + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + /* + * Sample these after adding the retention lease so that advancement here guarantees we have advanced past the timestamp on the + * lease. + */ + final ThreadPool threadPool = getInstanceFromNode(ThreadPool.class); + final long timestampUpperBound = threadPool.absoluteTimeInMillis(); + final long start = System.nanoTime(); + + final IndicesStatsResponse initialStats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + + assertNotNull(initialStats.getShards()); + assertThat(initialStats.getShards(), arrayWithSize(1)); + assertNotNull(initialStats.getShards()[0].getRetentionLeaseStats()); + assertThat(initialStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease initialRetentionLease = + initialStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + + final long nextRetainingSequenceNumber = + retainingSequenceNumber == RETAIN_ALL && randomBoolean() ? RETAIN_ALL + : randomLongBetween(Math.max(retainingSequenceNumber, 0L), Long.MAX_VALUE); + + /* + * Wait until the thread pool clock advances. Note that this will fail on a system when the system clock goes backwards during + * execution of the test. The specific circumstances under which this can fail is if the system clock goes backwards more than the + * suite timeout. It seems there is nothing simple that we can do to avoid this? + */ + do { + final long end = System.nanoTime(); + if (end - start < estimatedTimeInterval.nanos()) { + Thread.sleep(TimeUnit.NANOSECONDS.toMillis(estimatedTimeInterval.nanos() - (end - start))); + } + } while (threadPool.absoluteTimeInMillis() <= timestampUpperBound); + + client() + .execute( + RetentionLeaseActions.Renew.INSTANCE, + new RetentionLeaseActions.RenewRequest(indexService.getShard(0).shardId(), id, nextRetainingSequenceNumber, source)) + .actionGet(); + + final IndicesStatsResponse renewedStats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + + assertNotNull(renewedStats.getShards()); + assertThat(renewedStats.getShards(), arrayWithSize(1)); + assertNotNull(renewedStats.getShards()[0].getRetentionLeaseStats()); + assertThat(renewedStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease renewedRetentionLease = + renewedStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + assertThat(renewedRetentionLease.id(), equalTo(id)); + assertThat( + renewedRetentionLease.retainingSequenceNumber(), + equalTo(nextRetainingSequenceNumber == RETAIN_ALL ? 0L : nextRetainingSequenceNumber)); + assertThat(renewedRetentionLease.timestamp(), greaterThan(initialRetentionLease.timestamp())); + assertThat(renewedRetentionLease.source(), equalTo(source)); + } + + public void testRenewNotFound() { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + + final RetentionLeaseNotFoundException e = expectThrows( + RetentionLeaseNotFoundException.class, + () -> client() + .execute( + RetentionLeaseActions.Renew.INSTANCE, + new RetentionLeaseActions.RenewRequest( + indexService.getShard(0).shardId(), + id, + retainingSequenceNumber, + source)) + .actionGet()); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] not found"))); + } + + public void testRemoveAction() { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + client() + .execute( + RetentionLeaseActions.Remove.INSTANCE, + new RetentionLeaseActions.RemoveRequest(indexService.getShard(0).shardId(), id)) + .actionGet(); + + final IndicesStatsResponse stats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(1)); + assertNotNull(stats.getShards()[0].getRetentionLeaseStats()); + assertThat(stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(0)); + } + + public void testRemoveNotFound() { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + + final String id = randomAlphaOfLength(8); + + final RetentionLeaseNotFoundException e = expectThrows( + RetentionLeaseNotFoundException.class, + () -> client() + .execute( + RetentionLeaseActions.Remove.INSTANCE, + new RetentionLeaseActions.RemoveRequest(indexService.getShard(0).shardId(), id)) + .actionGet()); + assertThat(e, hasToString(containsString("retention lease with ID [" + id + "] not found"))); + } + + public void testAddUnderBlock() throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + runActionUnderBlockTest( + indexService, + (shardId, actionLatch) -> + client().execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(shardId, id, retainingSequenceNumber, source), + new ActionListener() { + + @Override + public void onResponse(final RetentionLeaseActions.Response response) { + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + })); + + final IndicesStatsResponse stats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(1)); + assertNotNull(stats.getShards()[0].getRetentionLeaseStats()); + assertThat(stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease retentionLease = stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(id)); + assertThat(retentionLease.retainingSequenceNumber(), equalTo(retainingSequenceNumber == RETAIN_ALL ? 0L : retainingSequenceNumber)); + assertThat(retentionLease.source(), equalTo(source)); + } + + public void testRenewUnderBlock() throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + + /* + * When we renew the lease, we want to ensure that the timestamp on the thread pool clock has advanced. To do this, we sample how + * often the thread pool clock advances based on the following setting. After we add the initial lease we sample the relative time. + * Immediately before the renewal of the lease, we sleep long enough to ensure that an estimated time interval has elapsed, and + * sample the thread pool to ensure the clock has in fact advanced. + */ + final TimeValue estimatedTimeInterval = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(getInstanceFromNode(Node.class).settings()); + + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + /* + * Sample these after adding the retention lease so that advancement here guarantees we have advanced past the timestamp on the + * lease. + */ + final ThreadPool threadPool = getInstanceFromNode(ThreadPool.class); + final long timestampUpperBound = threadPool.absoluteTimeInMillis(); + final long start = System.nanoTime(); + + final IndicesStatsResponse initialStats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + + assertNotNull(initialStats.getShards()); + assertThat(initialStats.getShards(), arrayWithSize(1)); + assertNotNull(initialStats.getShards()[0].getRetentionLeaseStats()); + assertThat(initialStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease initialRetentionLease = + initialStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + + final long nextRetainingSequenceNumber = + retainingSequenceNumber == RETAIN_ALL && randomBoolean() ? RETAIN_ALL + : randomLongBetween(Math.max(retainingSequenceNumber, 0L), Long.MAX_VALUE); + + /* + * Wait until the thread pool clock advances. Note that this will fail on a system when the system clock goes backwards during + * execution of the test. The specific circumstances under which this can fail is if the system clock goes backwards more than the + * suite timeout. It seems there is nothing simple that we can do to avoid this? + */ + do { + final long end = System.nanoTime(); + if (end - start < estimatedTimeInterval.nanos()) { + Thread.sleep(TimeUnit.NANOSECONDS.toMillis(estimatedTimeInterval.nanos() - (end - start))); + } + } while (threadPool.absoluteTimeInMillis() <= timestampUpperBound); + + runActionUnderBlockTest( + indexService, + (shardId, actionLatch) -> + client().execute( + RetentionLeaseActions.Renew.INSTANCE, + new RetentionLeaseActions.RenewRequest(shardId, id, nextRetainingSequenceNumber, source), + new ActionListener() { + + @Override + public void onResponse(final RetentionLeaseActions.Response response) { + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + })); + + final IndicesStatsResponse renewedStats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + + assertNotNull(renewedStats.getShards()); + assertThat(renewedStats.getShards(), arrayWithSize(1)); + assertNotNull(renewedStats.getShards()[0].getRetentionLeaseStats()); + assertThat(renewedStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + final RetentionLease renewedRetentionLease = + renewedStats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases().iterator().next(); + assertThat(renewedRetentionLease.id(), equalTo(id)); + assertThat( + renewedRetentionLease.retainingSequenceNumber(), + equalTo(nextRetainingSequenceNumber == RETAIN_ALL ? 0L : nextRetainingSequenceNumber)); + assertThat(renewedRetentionLease.timestamp(), greaterThan(initialRetentionLease.timestamp())); + assertThat(renewedRetentionLease.source(), equalTo(source)); + } + + public void testRemoveUnderBlock() throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build(); + final IndexService indexService = createIndex("index", settings); + ensureGreen("index"); + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomBoolean() ? RETAIN_ALL : randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + + client() + .execute( + RetentionLeaseActions.Add.INSTANCE, + new RetentionLeaseActions.AddRequest(indexService.getShard(0).shardId(), id, retainingSequenceNumber, source)) + .actionGet(); + + runActionUnderBlockTest( + indexService, + (shardId, actionLatch) -> + client().execute( + RetentionLeaseActions.Remove.INSTANCE, + new RetentionLeaseActions.RemoveRequest(shardId, id), + new ActionListener() { + + @Override + public void onResponse(final RetentionLeaseActions.Response response) { + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + })); + + final IndicesStatsResponse stats = client() + .execute( + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest().indices("index")) + .actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(1)); + assertNotNull(stats.getShards()[0].getRetentionLeaseStats()); + assertThat(stats.getShards()[0].getRetentionLeaseStats().retentionLeases().leases(), hasSize(0)); + } + + /* + * Tests that use this method are ensuring that the asynchronous usage of the permits API when permit acquisition is blocked is + * correctly handler. In these scenarios, we first acquire all permits. Then we submit a request to one of the retention lease actions + * (via the consumer callback). That invocation will go asynchronous and be queued, since all permits are blocked. Then we release the + * permit block and except that the callbacks occur correctly. These assertions happen after returning from this method. + */ + private void runActionUnderBlockTest( + final IndexService indexService, + final BiConsumer consumer) throws InterruptedException { + + final CountDownLatch blockedLatch = new CountDownLatch(1); + final CountDownLatch unblockLatch = new CountDownLatch(1); + indexService.getShard(0).acquireAllPrimaryOperationsPermits( + new ActionListener() { + + @Override + public void onResponse(final Releasable releasable) { + try (Releasable ignore = releasable) { + blockedLatch.countDown(); + unblockLatch.await(); + } catch (final InterruptedException e) { + onFailure(e); + } + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + }, + TimeValue.timeValueHours(1)); + + blockedLatch.await(); + + final CountDownLatch actionLatch = new CountDownLatch(1); + + consumer.accept(indexService.getShard(0).shardId(), actionLatch); + + unblockLatch.countDown(); + actionLatch.await(); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index e738c04d2a1bb..81ea56c609624 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -41,6 +42,7 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportService; import org.mockito.ArgumentCaptor; @@ -91,7 +93,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testRetentionLeaseBackgroundSyncActionOnPrimary() { + public void testRetentionLeaseBackgroundSyncActionOnPrimary() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -120,13 +122,13 @@ public void testRetentionLeaseBackgroundSyncActionOnPrimary() { final ReplicationOperation.PrimaryResult result = action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be periodically flushed - verify(indexShard).afterWriteOperation(); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // we should forward the request containing the current retention leases to the replica assertThat(result.replicaRequest(), sameInstance(request)); } - public void testRetentionLeaseBackgroundSyncActionOnReplica() { + public void testRetentionLeaseBackgroundSyncActionOnReplica() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -156,8 +158,8 @@ public void testRetentionLeaseBackgroundSyncActionOnReplica() { final TransportReplicationAction.ReplicaResult result = action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); - // the retention leases on the shard should be periodically flushed - verify(indexShard).afterWriteOperation(); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // the result should indicate success final AtomicBoolean success = new AtomicBoolean(); result.respond(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); @@ -203,9 +205,13 @@ protected void doExecute(Task task, Request request, ActionListener captor = ArgumentCaptor.forClass(ParameterizedMessage.class); verify(retentionLeaseSyncActionLogger).warn(captor.capture(), same(e)); final ParameterizedMessage message = captor.getValue(); @@ -227,4 +233,31 @@ protected Logger getLogger() { assertTrue(invoked.get()); } + public void testBlocks() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseBackgroundSyncAction action = new RetentionLeaseBackgroundSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + + assertNull(action.indexBlockLevel()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 13dadd051c273..76b9720ad1aa9 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -19,35 +19,45 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; @@ -70,7 +80,7 @@ public List> getSettings() { protected Collection> nodePlugins() { return Stream.concat( super.nodePlugins().stream(), - Stream.of(RetentionLeaseSyncIntervalSettingPlugin.class)) + Stream.of(RetentionLeaseSyncIntervalSettingPlugin.class, MockTransportService.TestPlugin.class)) .collect(Collectors.toList()); } @@ -80,6 +90,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final Settings settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build(); createIndex("index", settings); ensureGreen("index"); @@ -90,7 +101,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); // we will add multiple retention leases and expect to see them synced to all replicas final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(); + final Map currentRetentionLeases = new LinkedHashMap<>(); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -98,15 +109,13 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); // simulate a peer recovery which locks the soft deletes policy on the primary - final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLockForPeerRecovery() : () -> {}; + final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); retentionLock.close(); - // check retention leases have been committed on the primary - final RetentionLeases primaryCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - primary.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primaryCommittedRetentionLeases))); + // check retention leases have been written on the primary + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primary.loadRetentionLeases()))); // check current retention leases have been synced to all replicas for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { @@ -118,10 +127,66 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); - // check retention leases have been committed on the replica - final RetentionLeases replicaCommittedRetentionLeases = RetentionLeases.decodeRetentionLeases( - replica.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); - assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replicaCommittedRetentionLeases))); + // check retention leases have been written on the replica + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); + } + } + } + + public void testRetentionLeaseSyncedOnRemove() throws Exception { + final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", numberOfReplicas) + .build(); + createIndex("index", settings); + ensureGreen("index"); + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + final int length = randomIntBetween(1, 8); + final Map currentRetentionLeases = new LinkedHashMap<>(); + for (int i = 0; i < length; i++) { + final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); + final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + // simulate a peer recovery which locks the soft deletes policy on the primary + final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; + currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); + latch.await(); + retentionLock.close(); + } + + for (int i = 0; i < length; i++) { + final String id = randomFrom(currentRetentionLeases.keySet()); + final CountDownLatch latch = new CountDownLatch(1); + primary.removeRetentionLease(id, ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()))); + // simulate a peer recovery which locks the soft deletes policy on the primary + final Closeable retentionLock = randomBoolean() ? primary.acquireRetentionLock() : () -> {}; + currentRetentionLeases.remove(id); + latch.await(); + retentionLock.close(); + + // check retention leases have been written on the primary + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(primary.loadRetentionLeases()))); + + // check current retention leases have been synced to all replicas + for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { + final String replicaShardNodeId = replicaShard.currentNodeId(); + final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); + final IndexShard replica = internalCluster() + .getInstance(IndicesService.class, replicaShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); + assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); + + // check retention leases have been written on the replica + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); } } } @@ -153,7 +218,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { .prepareUpdateSettings("index") .setSettings( Settings.builder() - .putNull(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey()) + .putNull(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey()) .build()) .get(); assertTrue(longTtlResponse.isAcknowledged()); @@ -183,7 +248,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { .prepareUpdateSettings("index") .setSettings( Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), retentionLeaseTimeToLive) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), retentionLeaseTimeToLive) .build()) .get(); assertTrue(shortTtlResponse.isAcknowledged()); @@ -213,7 +278,7 @@ public void testBackgroundRetentionLeaseSync() throws Exception { final Settings settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) .build(); createIndex("index", settings); ensureGreen("index"); @@ -224,7 +289,7 @@ public void testBackgroundRetentionLeaseSync() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); // we will add multiple retention leases and expect to see them synced to all replicas final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(length); + final Map currentRetentionLeases = new LinkedHashMap<>(length); final List ids = new ArrayList<>(length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); @@ -260,20 +325,50 @@ public void testBackgroundRetentionLeaseSync() throws Exception { } } + public void testRetentionLeasesBackgroundSyncWithSoftDeletesDisabled() throws Exception { + final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); + TimeValue syncIntervalSetting = TimeValue.timeValueMillis(between(1, 100)); + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", numberOfReplicas) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), syncIntervalSetting.getStringRep()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) + .build(); + createIndex("index", settings); + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, primaryShardNodeName); + final AtomicBoolean backgroundSyncRequestSent = new AtomicBoolean(); + primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.startsWith(RetentionLeaseBackgroundSyncAction.ACTION_NAME)) { + backgroundSyncRequestSent.set(true); + } + connection.sendRequest(requestId, action, request, options); + }); + final long start = System.nanoTime(); + ensureGreen("index"); + final long syncEnd = System.nanoTime(); + // We sleep long enough for the retention leases background sync to be triggered + Thread.sleep(Math.max(0, randomIntBetween(2, 3) * syncIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(syncEnd - start))); + assertFalse("retention leases background sync must be a noop if soft deletes is disabled", backgroundSyncRequestSent.get()); + } + public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); /* * We effectively disable the background sync to ensure that the retention leases are not synced in the background so that the only - * source of retention leases on the replicas would be from the commit point and recovery. + * source of retention leases on the replicas would be from recovery. */ - final Settings settings = Settings.builder() + final Settings.Builder settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24)) - .build(); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueHours(24)); // when we increase the number of replicas below we want to exclude the replicas from being allocated so that they do not recover - assertAcked(prepareCreate("index", 1).setSettings(settings)); + assertAcked(prepareCreate("index", 1, settings)); ensureYellow("index"); final AcknowledgedResponse response = client().admin() .indices() @@ -286,7 +381,8 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getInstance(IndicesService.class, primaryShardNodeName) .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final int length = randomIntBetween(1, 8); - final Map currentRetentionLeases = new HashMap<>(); + final Map currentRetentionLeases = new LinkedHashMap<>(); + logger.info("adding retention [{}}] leases", length); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); @@ -295,13 +391,36 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); latch.await(); - /* - * Now renew the leases; since we do not flush immediately on renewal, this means that the latest retention leases will not be - * in the latest commit point and therefore not transferred during the file-copy phase of recovery. - */ - currentRetentionLeases.put(id, primary.renewRetentionLease(id, retainingSequenceNumber, source)); } + logger.info("finished adding [{}] retention leases", length); + // cause some recoveries to fail to ensure that retention leases are handled properly when retrying a recovery + assertAcked(client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder().put(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), TimeValue.timeValueMillis(100)))); + final Semaphore recoveriesToDisrupt = new Semaphore(scaledRandomIntBetween(0, 4)); + final MockTransportService primaryTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, primaryShardNodeName); + primaryTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE) && recoveriesToDisrupt.tryAcquire()) { + if (randomBoolean()) { + // return a ConnectTransportException to the START_RECOVERY action + final TransportService replicaTransportService = + internalCluster().getInstance(TransportService.class, connection.getNode().getName()); + final DiscoveryNode primaryNode = primaryTransportService.getLocalNode(); + replicaTransportService.disconnectFromNode(primaryNode); + replicaTransportService.connectToNode(primaryNode); + } else { + // return an exception to the FINALIZE action + throw new ElasticsearchException("failing recovery for test purposes"); + } + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("allow [{}] replicas to allocate", numberOfReplicas); // now allow the replicas to be allocated and wait for recovery to finalize allowNodes("index", 1 + numberOfReplicas); ensureGreen("index"); @@ -315,7 +434,241 @@ public void testRetentionLeasesSyncOnRecovery() throws Exception { .getShardOrNull(new ShardId(resolveIndex("index"), 0)); final Map retentionLeasesOnReplica = RetentionLeases.toMap(replica.getRetentionLeases()); assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); + + // check retention leases have been written on the replica; see RecoveryTarget#finalizeRecovery + assertThat(currentRetentionLeases, equalTo(RetentionLeases.toMap(replica.loadRetentionLeases()))); } } + public void testCanAddRetentionLeaseUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runUnderBlockTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> { + final String nextId = randomValueOtherThan(idForInitialRetentionLease, () -> randomAlphaOfLength(8)); + final long nextRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + primary.addRetentionLease(nextId, nextRetainingSequenceNumber, nextSource, listener); + }, + primary -> {}); + } + + public void testCanRenewRetentionLeaseUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + final long initialRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final AtomicReference retentionLease = new AtomicReference<>(); + runUnderBlockTest( + idForInitialRetentionLease, + initialRetainingSequenceNumber, + (primary, listener) -> { + final long nextRetainingSequenceNumber = randomLongBetween(initialRetainingSequenceNumber, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + retentionLease.set(primary.renewRetentionLease(idForInitialRetentionLease, nextRetainingSequenceNumber, nextSource)); + listener.onResponse(new ReplicationResponse()); + }, + primary -> { + try { + /* + * If the background renew was able to execute, then the retention leases were persisted to disk. There is no other + * way for the current retention leases to end up written to disk so we assume that if they are written to disk, it + * implies that the background sync was able to execute under a block. + */ + assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); + } catch (final Exception e) { + fail(e.toString()); + } + }); + + } + + public void testCanRemoveRetentionLeasesUnderBlock() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runUnderBlockTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> primary.removeRetentionLease(idForInitialRetentionLease, listener), + indexShard -> {}); + } + + private void runUnderBlockTest( + final String idForInitialRetentionLease, + final long initialRetainingSequenceNumber, + final BiConsumer> primaryConsumer, + final Consumer afterSync) throws InterruptedException { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + assertAcked(prepareCreate("index").setSettings(settings)); + ensureGreen("index"); + + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); + latch.await(); + + final String block = randomFrom("read_only", "read_only_allow_delete", "read", "write", "metadata"); + + client() + .admin() + .indices() + .prepareUpdateSettings("index") + .setSettings(Settings.builder().put("index.blocks." + block, true).build()) + .get(); + + try { + final CountDownLatch actionLatch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(); + + primaryConsumer.accept( + primary, + new ActionListener() { + + @Override + public void onResponse(final ReplicationResponse replicationResponse) { + success.set(true); + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + }); + actionLatch.await(); + assertTrue(success.get()); + afterSync.accept(primary); + } finally { + client() + .admin() + .indices() + .prepareUpdateSettings("index") + .setSettings(Settings.builder().putNull("index.blocks." + block).build()) + .get(); + } + } + + public void testCanAddRetentionLeaseWithoutWaitingForShards() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runWaitForShardsTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> { + final String nextId = randomValueOtherThan(idForInitialRetentionLease, () -> randomAlphaOfLength(8)); + final long nextRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + primary.addRetentionLease(nextId, nextRetainingSequenceNumber, nextSource, listener); + }, + primary -> {}); + } + + public void testCanRenewRetentionLeaseWithoutWaitingForShards() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + final long initialRetainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); + final AtomicReference retentionLease = new AtomicReference<>(); + runWaitForShardsTest( + idForInitialRetentionLease, + initialRetainingSequenceNumber, + (primary, listener) -> { + final long nextRetainingSequenceNumber = randomLongBetween(initialRetainingSequenceNumber, Long.MAX_VALUE); + final String nextSource = randomAlphaOfLength(8); + retentionLease.set(primary.renewRetentionLease(idForInitialRetentionLease, nextRetainingSequenceNumber, nextSource)); + listener.onResponse(new ReplicationResponse()); + }, + primary -> { + try { + /* + * If the background renew was able to execute, then the retention leases were persisted to disk. There is no other + * way for the current retention leases to end up written to disk so we assume that if they are written to disk, it + * implies that the background sync was able to execute despite wait for shards being set on the index. + */ + assertBusy(() -> assertThat(primary.loadRetentionLeases().leases(), contains(retentionLease.get()))); + } catch (final Exception e) { + fail(e.toString()); + } + }); + + } + + public void testCanRemoveRetentionLeasesWithoutWaitingForShards() throws InterruptedException { + final String idForInitialRetentionLease = randomAlphaOfLength(8); + runWaitForShardsTest( + idForInitialRetentionLease, + randomLongBetween(0, Long.MAX_VALUE), + (primary, listener) -> primary.removeRetentionLease(idForInitialRetentionLease, listener), + primary -> {}); + } + + private void runWaitForShardsTest( + final String idForInitialRetentionLease, + final long initialRetainingSequenceNumber, + final BiConsumer> primaryConsumer, + final Consumer afterSync) throws InterruptedException { + final int numDataNodes = internalCluster().numDataNodes(); + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", numDataNodes) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + assertAcked(prepareCreate("index").setSettings(settings)); + ensureYellowAndNoInitializingShards("index"); + assertFalse(client().admin().cluster().prepareHealth("index").setWaitForActiveShards(numDataNodes).get().isTimedOut()); + + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener); + latch.await(); + + final String waitForActiveValue = randomBoolean() ? "all" : Integer.toString(numDataNodes + 1); + + client() + .admin() + .indices() + .prepareUpdateSettings("index") + .setSettings(Settings.builder().put("index.write.wait_for_active_shards", waitForActiveValue).build()) + .get(); + + final CountDownLatch actionLatch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(); + + primaryConsumer.accept( + primary, + new ActionListener() { + + @Override + public void onResponse(final ReplicationResponse replicationResponse) { + success.set(true); + actionLatch.countDown(); + } + + @Override + public void onFailure(final Exception e) { + fail(e.toString()); + } + + }); + actionLatch.await(); + assertTrue(success.get()); + afterSync.accept(primary); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java index 8721450073531..adacf6539a80e 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseStatsTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -41,6 +42,7 @@ public void testRetentionLeaseStats() throws InterruptedException { final Settings settings = Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build(); createIndex("index", settings); ensureGreen("index"); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 18817d784b131..9b9ad6a0962c1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -29,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -90,7 +90,7 @@ public void tearDown() throws Exception { super.tearDown(); } - public void testRetentionLeaseSyncActionOnPrimary() { + public void testRetentionLeaseSyncActionOnPrimary() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -118,18 +118,15 @@ public void testRetentionLeaseSyncActionOnPrimary() { final TransportWriteAction.WritePrimaryResult result = action.shardOperationOnPrimary(request, indexShard); - // the retention leases on the shard should be flushed - final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); - verify(indexShard).flush(flushRequest.capture()); - assertTrue(flushRequest.getValue().force()); - assertTrue(flushRequest.getValue().waitIfOngoing()); + // the retention leases on the shard should be persisted + verify(indexShard).persistRetentionLeases(); // we should forward the request containing the current retention leases to the replica assertThat(result.replicaRequest(), sameInstance(request)); // we should start with an empty replication response assertNull(result.finalResponseIfSuccessful.getShardInfo()); } - public void testRetentionLeaseSyncActionOnReplica() { + public void testRetentionLeaseSyncActionOnReplica() throws WriteStateException { final IndicesService indicesService = mock(IndicesService.class); final Index index = new Index("index", "uuid"); @@ -159,11 +156,8 @@ public void testRetentionLeaseSyncActionOnReplica() { action.shardOperationOnReplica(request, indexShard); // the retention leases on the shard should be updated verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); - // the retention leases on the shard should be flushed - final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); - verify(indexShard).flush(flushRequest.capture()); - assertTrue(flushRequest.getValue().force()); - assertTrue(flushRequest.getValue().waitIfOngoing()); + // the retention leases on the shard should be persisteed + verify(indexShard).persistRetentionLeases(); // the result should indicate success final AtomicBoolean success = new AtomicBoolean(); result.respond(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); @@ -234,4 +228,31 @@ protected Logger getLogger() { assertTrue(invoked.get()); } + public void testBlocks() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + + assertNull(action.indexBlockLevel()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java index bd2dee78b05ed..f38a806bd7b95 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java @@ -31,14 +31,6 @@ public class RetentionLeaseTests extends ESTestCase { - public void testInvalidId() { - final String id = "id" + randomFrom(":", ";", ","); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> new RetentionLease(id, randomNonNegativeLong(), randomNonNegativeLong(), "source")); - assertThat(e, hasToString(containsString("retention lease ID can not contain any of [:;,] but was [" + id + "]"))); - } - public void testEmptyId() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -64,14 +56,6 @@ public void testTimestampOutOfRange() { assertThat(e, hasToString(containsString("retention lease timestamp [" + timestamp + "] out of range"))); } - public void testInvalidSource() { - final String source = "source" + randomFrom(":", ";", ","); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> new RetentionLease("id", randomNonNegativeLong(), randomNonNegativeLong(), source)); - assertThat(e, hasToString(containsString("retention lease source can not contain any of [:;,] but was [" + source + "]"))); - } - public void testEmptySource() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -93,13 +77,4 @@ public void testRetentionLeaseSerialization() throws IOException { } } - public void testRetentionLeaseEncoding() { - final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomNonNegativeLong(); - final long timestamp = randomNonNegativeLong(); - final String source = randomAlphaOfLength(8); - final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); - assertThat(RetentionLease.decodeRetentionLease(RetentionLease.encodeRetentionLease(retentionLease)), equalTo(retentionLease)); - } - } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java new file mode 100644 index 0000000000000..159e85b572b98 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseXContentTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class RetentionLeaseXContentTests extends AbstractXContentTestCase { + + @Override + protected RetentionLease createTestInstance() { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + return new RetentionLease(id, retainingSequenceNumber, timestamp, source); + } + + @Override + protected RetentionLease doParseInstance(final XContentParser parser) throws IOException { + return RetentionLease.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java index 33cc83f602860..c63b2ebb6645b 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesTests.java @@ -19,13 +19,18 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; -import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -49,37 +54,15 @@ public void testVersionOutOfRange() { assertThat(e, hasToString(containsString("version must be non-negative but was [" + version + "]"))); } - public void testRetentionLeasesEncoding() { - final long primaryTerm = randomNonNegativeLong(); - final long version = randomNonNegativeLong(); - final int length = randomIntBetween(0, 8); - final List retentionLeases = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomNonNegativeLong(); - final long timestamp = randomNonNegativeLong(); - final String source = randomAlphaOfLength(8); - final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); - retentionLeases.add(retentionLease); - } - final RetentionLeases decodedRetentionLeases = - RetentionLeases.decodeRetentionLeases( - RetentionLeases.encodeRetentionLeases(new RetentionLeases(primaryTerm, version, retentionLeases))); - assertThat(decodedRetentionLeases.version(), equalTo(version)); - if (length == 0) { - assertThat(decodedRetentionLeases.leases(), empty()); - } else { - assertThat(decodedRetentionLeases.leases(), containsInAnyOrder(retentionLeases.toArray(new RetentionLease[0]))); - } - } - public void testSupersedesByPrimaryTerm() { final long lowerPrimaryTerm = randomLongBetween(1, Long.MAX_VALUE); final RetentionLeases left = new RetentionLeases(lowerPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); final long higherPrimaryTerm = randomLongBetween(lowerPrimaryTerm + 1, Long.MAX_VALUE); final RetentionLeases right = new RetentionLeases(higherPrimaryTerm, randomLongBetween(1, Long.MAX_VALUE), Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); } public void testSupersedesByVersion() { @@ -89,7 +72,53 @@ public void testSupersedesByVersion() { final RetentionLeases left = new RetentionLeases(primaryTerm, lowerVersion, Collections.emptyList()); final RetentionLeases right = new RetentionLeases(primaryTerm, higherVersion, Collections.emptyList()); assertTrue(right.supersedes(left)); + assertTrue(right.supersedes(left.primaryTerm(), left.version())); assertFalse(left.supersedes(right)); + assertFalse(left.supersedes(right.primaryTerm(), right.version())); + } + + public void testRetentionLeasesRejectsDuplicates() { + final RetentionLeases retentionLeases = randomRetentionLeases(false); + final RetentionLease retentionLease = randomFrom(retentionLeases.leases()); + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> new RetentionLeases( + retentionLeases.primaryTerm(), + retentionLeases.version(), + Stream.concat(retentionLeases.leases().stream(), Stream.of(retentionLease)).collect(Collectors.toList()))); + assertThat(e, hasToString(containsString("duplicate retention lease ID [" + retentionLease.id() + "]"))); + } + + public void testLeasesPreservesIterationOrder() { + final RetentionLeases retentionLeases = randomRetentionLeases(true); + if (retentionLeases.leases().isEmpty()) { + assertThat(retentionLeases.leases(), empty()); + } else { + assertThat(retentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0]))); + } + } + + public void testRetentionLeasesMetaDataStateFormat() throws IOException { + final Path path = createTempDir(); + final RetentionLeases retentionLeases = randomRetentionLeases(true); + RetentionLeases.FORMAT.writeAndCleanup(retentionLeases, path); + assertThat(RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path), equalTo(retentionLeases)); + } + + private RetentionLeases randomRetentionLeases(boolean allowEmpty) { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final int length = randomIntBetween(allowEmpty ? 0 : 1, 8); + final List leases = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); + leases.add(retentionLease); + } + return new RetentionLeases(primaryTerm, version, leases); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java new file mode 100644 index 0000000000000..5fc2ace16ee94 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeasesXContentTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class RetentionLeasesXContentTests extends AbstractXContentTestCase { + + @Override + protected RetentionLeases createTestInstance() { + final long primaryTerm = randomNonNegativeLong(); + final long version = randomNonNegativeLong(); + final int length = randomIntBetween(0, 8); + final List leases = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final long timestamp = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); + leases.add(retentionLease); + } + return new RetentionLeases(primaryTerm, version, leases); + } + + @Override + protected RetentionLeases doParseInstance(final XContentParser parser) throws IOException { + return RetentionLeases.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java index 59c3553d25fd2..d71bade29a369 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java @@ -431,7 +431,7 @@ public void testListenersReadyToBeNotifiedUsesExecutor() { assertThat(count.get(), equalTo(numberOfListeners)); } - public void testConcurrency() throws BrokenBarrierException, InterruptedException { + public void testConcurrency() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, 8)); final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, scheduler, logger); final AtomicLong globalCheckpoint = new AtomicLong(NO_OPS_PERFORMED); @@ -470,11 +470,12 @@ public void testConcurrency() throws BrokenBarrierException, InterruptedExceptio // sometimes this will notify the listener immediately globalCheckpointListeners.add( globalCheckpoint.get(), - maybeMultipleInvocationProtectingListener((g, e) -> { - if (invocation.compareAndSet(false, true) == false) { - throw new IllegalStateException("listener invoked twice"); - } - }), + maybeMultipleInvocationProtectingListener( + (g, e) -> { + if (invocation.compareAndSet(false, true) == false) { + throw new IllegalStateException("listener invoked twice"); + } + }), randomBoolean() ? null : TimeValue.timeValueNanos(randomLongBetween(1, TimeUnit.MICROSECONDS.toNanos(1)))); } // synchronize ending with the updating thread and the main test thread @@ -491,11 +492,13 @@ public void testConcurrency() throws BrokenBarrierException, InterruptedExceptio globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint.incrementAndGet()); } assertThat(globalCheckpointListeners.pendingListeners(), equalTo(0)); - executor.shutdown(); - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); + // wait for all the listeners to be notified for (final AtomicBoolean invocation : invocations) { - assertTrue(invocation.get()); + assertBusy(() -> assertTrue(invocation.get())); } + // now shutdown + executor.shutdown(); + assertTrue(executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS)); updatingThread.join(); listenersThread.join(); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 674c252d780f3..fb8574594a874 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -120,6 +120,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -474,18 +475,22 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { final FlushStats initialStats = shard.flushStats(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); check = () -> { + assertFalse(shard.shouldPeriodicallyFlush()); final FlushStats currentStats = shard.flushStats(); String msg = String.format(Locale.ROOT, "flush stats: total=[%d vs %d], periodic=[%d vs %d]", initialStats.getTotal(), currentStats.getTotal(), initialStats.getPeriodic(), currentStats.getPeriodic()); - assertThat(msg, currentStats.getPeriodic(), equalTo(initialStats.getPeriodic() + 1)); - assertThat(msg, currentStats.getTotal(), equalTo(initialStats.getTotal() + 1)); + assertThat(msg, currentStats.getPeriodic(), + either(equalTo(initialStats.getPeriodic() + 1)).or(equalTo(initialStats.getPeriodic() + 2))); + assertThat(msg, currentStats.getTotal(), + either(equalTo(initialStats.getTotal() + 1)).or(equalTo(initialStats.getTotal() + 2))); }; } else { final long generation = getTranslog(shard).currentFileGeneration(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - check = () -> assertEquals( - generation + 1, - getTranslog(shard).currentFileGeneration()); + check = () -> { + assertFalse(shard.shouldRollTranslogGeneration()); + assertEquals(generation + 1, getTranslog(shard).currentFileGeneration()); + }; } assertBusy(check); running.set(false); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index cc64fc6f8b2de..4a33eadb2b281 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -19,16 +19,13 @@ package org.elasticsearch.index.shard; -import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.RetentionLeaseStats; @@ -74,7 +71,8 @@ protected void tearDownThreadPool() { } public void testAddOrRenewRetentionLease() throws IOException { - final IndexShard indexShard = newStartedShard(true); + final IndexShard indexShard = newStartedShard(true, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); final long primaryTerm = indexShard.getOperationPrimaryTerm(); try { final int length = randomIntBetween(0, 8); @@ -104,6 +102,37 @@ public void testAddOrRenewRetentionLease() throws IOException { } } + public void testRemoveRetentionLease() throws IOException { + final IndexShard indexShard = newStartedShard(true, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); + final long primaryTerm = indexShard.getOperationPrimaryTerm(); + try { + final int length = randomIntBetween(0, 8); + final long[] minimumRetainingSequenceNumbers = new long[length]; + for (int i = 0; i < length; i++) { + minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + indexShard.addRetentionLease( + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); + assertRetentionLeases( + indexShard, i + 1, minimumRetainingSequenceNumbers, primaryTerm, 1 + i, true, false); + } + + for (int i = 0; i < length; i++) { + indexShard.removeRetentionLease(Integer.toString(length - i - 1), ActionListener.wrap(() -> {})); + assertRetentionLeases( + indexShard, + length - i - 1, + minimumRetainingSequenceNumbers, + primaryTerm, + 1 + length + i, + true, + false); + } + } finally { + closeShards(indexShard); + } + } + public void testExpirationOnPrimary() throws IOException { runExpirationTest(true); } @@ -116,8 +145,9 @@ private void runExpirationTest(final boolean primary) throws IOException { final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis()); final Settings settings = Settings .builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put( - IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); // current time is mocked through the thread pool @@ -181,10 +211,10 @@ private void runExpirationTest(final boolean primary) throws IOException { } } - public void testCommit() throws IOException { + public void testPersistence() throws IOException { final Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), Long.MAX_VALUE, TimeUnit.NANOSECONDS) .build(); final IndexShard indexShard = newStartedShard( true, @@ -202,19 +232,17 @@ public void testCommit() throws IOException { currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE)); - // force a commit - indexShard.flush(new FlushRequest().force(true)); + // force the retention leases to persist + indexShard.persistRetentionLeases(); - // the committed retention leases should equal our current retention leases - final SegmentInfos segmentCommitInfos = indexShard.store().readLastCommittedSegmentsInfo(); - assertTrue(segmentCommitInfos.getUserData().containsKey(Engine.RETENTION_LEASES)); + // the written retention leases should equal our current retention leases final RetentionLeases retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); - final RetentionLeases committedRetentionLeases = IndexShard.getRetentionLeases(segmentCommitInfos); + final RetentionLeases writtenRetentionLeases = indexShard.loadRetentionLeases(); if (retentionLeases.leases().isEmpty()) { - assertThat(committedRetentionLeases.version(), equalTo(0L)); - assertThat(committedRetentionLeases.leases(), empty()); + assertThat(writtenRetentionLeases.version(), equalTo(0L)); + assertThat(writtenRetentionLeases.leases(), empty()); } else { - assertThat(committedRetentionLeases.version(), equalTo((long) length)); + assertThat(writtenRetentionLeases.version(), equalTo((long) length)); assertThat(retentionLeases.leases(), contains(retentionLeases.leases().toArray(new RetentionLease[0]))); } @@ -237,13 +265,28 @@ public void testCommit() throws IOException { } finally { closeShards(recoveredShard); } + + // we should not recover retention leases when force-allocating a stale primary + final IndexShard forceRecoveredShard = reinitShard( + indexShard, + ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)); + try { + recoverShardFromStore(forceRecoveredShard); + final RetentionLeases recoveredRetentionLeases = forceRecoveredShard.getEngine().config().retentionLeasesSupplier().get(); + assertThat(recoveredRetentionLeases.leases(), empty()); + assertThat(recoveredRetentionLeases.version(), equalTo(0L)); + } finally { + closeShards(forceRecoveredShard); + } } finally { closeShards(indexShard); } } public void testRetentionLeaseStats() throws IOException { - final IndexShard indexShard = newStartedShard(true); + final IndexShard indexShard = newStartedShard(true, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); try { final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; @@ -264,6 +307,22 @@ public void testRetentionLeaseStats() throws IOException { } } + public void testRetentionLeasesActionsFailWithSoftDeletesDisabled() throws Exception { + IndexShard shard = newStartedShard(true, Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build()); + assertThat(expectThrows(AssertionError.class, () -> shard.addRetentionLease(randomAlphaOfLength(10), + randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE), "test", ActionListener.wrap(() -> {}))).getMessage(), + equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled")); + assertThat(expectThrows(AssertionError.class, () -> shard.renewRetentionLease( + randomAlphaOfLength(10), randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE), "test")).getMessage(), + equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled")); + assertThat(expectThrows(AssertionError.class, () -> shard.removeRetentionLease( + randomAlphaOfLength(10), ActionListener.wrap(() -> {}))).getMessage(), + equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled")); + assertThat(expectThrows(AssertionError.class, shard::syncRetentionLeases).getMessage(), + equalTo("retention leases requires soft deletes but [index] does not have soft deletes enabled")); + closeShards(shard); + } + private void assertRetentionLeases( final IndexShard indexShard, final int size, diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 316ed39574c0c..25e19470654c2 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -101,6 +101,7 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -262,7 +263,7 @@ public void testFailShard() throws Exception { assertEquals(shardStateMetaData, getShardStateMetadata(shard)); // but index can't be opened for a failed shard assertThat("store index should be corrupted", StoreUtils.canOpenIndex(logger, shardPath.resolveIndex(), shard.shardId(), - (shardId, lockTimeoutMS) -> new DummyShardLock(shardId)), + (shardId, lockTimeoutMS, details) -> new DummyShardLock(shardId)), equalTo(false)); } @@ -630,41 +631,81 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; + final boolean isPrimaryMode; if (randomBoolean()) { // relocation target indexShard = newShard(newShardRouting(shardId, "local_node", "other node", - true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); + true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); + isPrimaryMode = false; } else if (randomBoolean()) { // simulate promotion indexShard = newStartedShard(false); ShardRouting replicaRouting = indexShard.routingEntry(); ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, - true, ShardRoutingState.STARTED, replicaRouting.allocationId()); + true, ShardRoutingState.STARTED, replicaRouting.allocationId()); final long newPrimaryTerm = indexShard.getPendingPrimaryTerm() + between(1, 1000); CountDownLatch latch = new CountDownLatch(1); indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> { - assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); - latch.countDown(); - }, 0L, - Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), - Collections.emptySet()); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); + latch.countDown(); + }, 0L, + Collections.singleton(indexShard.routingEntry().allocationId().getId()), + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), + Collections.emptySet()); latch.await(); + isPrimaryMode = true; } else { indexShard = newStartedShard(true); + isPrimaryMode = true; } - final long primaryTerm = indexShard.getPendingPrimaryTerm(); - assertEquals(0, indexShard.getActiveOperationsCount()); - Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); - assertEquals(2, indexShard.getActiveOperationsCount()); + final long pendingPrimaryTerm = indexShard.getPendingPrimaryTerm(); + if (isPrimaryMode) { + assertEquals(0, indexShard.getActiveOperationsCount()); + Releasable operation1 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(1, indexShard.getActiveOperationsCount()); + Releasable operation2 = acquirePrimaryOperationPermitBlockingly(indexShard); + assertEquals(2, indexShard.getActiveOperationsCount()); - Releasables.close(operation1, operation2); - assertEquals(0, indexShard.getActiveOperationsCount()); + Releasables.close(operation1, operation2); + assertEquals(0, indexShard.getActiveOperationsCount()); + } else { + indexShard.acquirePrimaryOperationPermit( + new ActionListener() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + } + }, + ThreadPool.Names.SAME, + "test"); + + final CountDownLatch latch = new CountDownLatch(1); + indexShard.acquireAllPrimaryOperationsPermits( + new ActionListener() { + @Override + public void onResponse(final Releasable releasable) { + throw new AssertionError(); + } + + @Override + public void onFailure(final Exception e) { + assertThat(e, instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e, hasToString(containsString("shard is not in primary mode"))); + latch.countDown(); + } + }, + TimeValue.timeValueSeconds(30)); + latch.await(); + } if (Assertions.ENABLED && indexShard.routingEntry().isRelocationTarget() == false) { - assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(primaryTerm, + assertThat(expectThrows(AssertionError.class, () -> indexShard.acquireReplicaOperationPermit(pendingPrimaryTerm, indexShard.getGlobalCheckpoint(), indexShard.getMaxSeqNoOfUpdatesOrDeletes(), new ActionListener() { @Override public void onResponse(Releasable releasable) { @@ -1046,8 +1087,8 @@ public void testGlobalCheckpointSync() throws IOException { final IndexMetaData.Builder indexMetadata = IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); final AtomicBoolean synced = new AtomicBoolean(); - final IndexShard primaryShard = - newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), () -> synced.set(true)); + final IndexShard primaryShard = newShard( + shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), () -> synced.set(true), RetentionLeaseSyncer.EMPTY); // add a replica recoverShardFromStore(primaryShard); final IndexShard replicaShard = newShard(shardId, false); @@ -1143,13 +1184,8 @@ public void onFailure(Exception e) { assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); assertThat(getShardDocUIDs(indexShard), equalTo(docsBeforeRollback)); - if (shouldRollback) { - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Collections.max( - Arrays.asList(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes, globalCheckpoint, globalCheckpointOnReplica)) - )); - } else { - assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(Math.max(currentMaxSeqNoOfUpdates, maxSeqNoOfUpdatesOrDeletes))); - } + // we conservatively roll MSU forward to maxSeqNo during restoreLocalHistory, ideally it should become just currentMaxSeqNoOfUpdates + assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(maxSeqNo)); closeShard(indexShard, false); } @@ -1459,12 +1495,17 @@ public String[] listAll() throws IOException { return super.listAll(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { - IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, - null, new InternalEngineFactory(), () -> { - }, EMPTY_EVENT_LISTENER); + IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, null, new InternalEngineFactory(), + () -> { }, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER); AtomicBoolean failureCallbackTriggered = new AtomicBoolean(false); shard.addShardFailureCallback((ig)->failureCallbackTriggered.set(true)); @@ -1667,10 +1708,9 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // recovery can be now finalized recoveryThread.join(); assertTrue(shard.isRelocatedPrimary()); - try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { - // lock can again be acquired - assertTrue(shard.isRelocatedPrimary()); - } + final ExecutionException e = expectThrows(ExecutionException.class, () -> acquirePrimaryOperationPermitBlockingly(shard)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); closeShards(shard); } @@ -1678,30 +1718,66 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { final IndexShard shard = newStartedShard(true); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); + final CountDownLatch startRecovery = new CountDownLatch(1); + final CountDownLatch relocationStarted = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { try { - shard.relocated(primaryContext -> {}); + startRecovery.await(); + shard.relocated(primaryContext -> relocationStarted.countDown()); } catch (InterruptedException e) { throw new RuntimeException(e); } }); recoveryThread.start(); - List> onLockAcquiredActions = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - PlainActionFuture onLockAcquired = new PlainActionFuture() { - @Override - public void onResponse(Releasable releasable) { - releasable.close(); - super.onResponse(releasable); - } - }; + + final int numberOfAcquisitions = randomIntBetween(1, 10); + final List assertions = new ArrayList<>(numberOfAcquisitions); + final int recoveryIndex = randomIntBetween(0, numberOfAcquisitions - 1); + + for (int i = 0; i < numberOfAcquisitions; i++) { + final PlainActionFuture onLockAcquired; + if (i < recoveryIndex) { + final AtomicBoolean invoked = new AtomicBoolean(); + onLockAcquired = new PlainActionFuture() { + + @Override + public void onResponse(Releasable releasable) { + invoked.set(true); + releasable.close(); + super.onResponse(releasable); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(); + } + + }; + assertions.add(() -> assertTrue(invoked.get())); + } else if (recoveryIndex == i) { + startRecovery.countDown(); + relocationStarted.await(); + onLockAcquired = new PlainActionFuture<>(); + assertions.add(() -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }); + } else { + onLockAcquired = new PlainActionFuture<>(); + assertions.add(() -> { + final ExecutionException e = expectThrows(ExecutionException.class, () -> onLockAcquired.get(30, TimeUnit.SECONDS)); + assertThat(e.getCause(), instanceOf(ShardNotInPrimaryModeException.class)); + assertThat(e.getCause(), hasToString(containsString("shard is not in primary mode"))); + }); + } + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); - onLockAcquiredActions.add(onLockAcquired); } - for (PlainActionFuture onLockAcquired : onLockAcquiredActions) { - assertNotNull(onLockAcquired.get(30, TimeUnit.SECONDS)); + for (final Runnable assertion : assertions) { + assertion.run(); } recoveryThread.join(); @@ -2122,6 +2198,7 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { null, shard.getEngineFactory(), shard.getGlobalCheckpointSyncer(), + shard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); @@ -2242,6 +2319,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { wrapper, new InternalEngineFactory(), () -> {}, + RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2396,6 +2474,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { wrapper, new InternalEngineFactory(), () -> {}, + RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2962,9 +3041,8 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("true", "checksum"))) .build(); - IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, - null, null, indexShard.engineFactory, - indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); final IndexShardRecoveryException indexShardRecoveryException = expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); @@ -3007,9 +3085,8 @@ public void testShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { } // try to start shard on corrupted files - final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, - null, null, indexShard.engineFactory, - indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); final IndexShardRecoveryException exception1 = expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); @@ -3030,9 +3107,8 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); // try to start another time shard on corrupted files - final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, - null, null, indexShard.engineFactory, - indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); final IndexShardRecoveryException exception2 = expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard2, true)); @@ -3070,9 +3146,8 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { .put(indexShard.indexSettings.getSettings()) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); - final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, - null, null, indexShard.engineFactory, - indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata(); assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1); @@ -3137,9 +3212,13 @@ private Result indexOnReplicaWithGaps( int localCheckpoint = offset; int max = offset; boolean gap = false; + Set ids = new HashSet<>(); for (int i = offset + 1; i < operations; i++) { if (!rarely() || i == operations - 1) { // last operation can't be a gap as it's not a gap anymore - final String id = Integer.toString(i); + final String id = ids.isEmpty() || randomBoolean() ? Integer.toString(i) : randomFrom(ids); + if (ids.add(id) == false) { // this is an update + indexShard.advanceMaxSeqNoOfUpdatesOrDeletes(i); + } SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), "_doc", id, new BytesArray("{}"), XContentType.JSON); indexShard.applyIndexOperationOnReplica(i, 1, @@ -3195,6 +3274,12 @@ public void testIsSearchIdle() throws Exception { // now loop until we are fast enough... shouldn't take long primary.awaitShardSearchActive(aBoolean -> {}); } while (primary.isSearchIdle()); + + assertBusy(() -> assertTrue(primary.isSearchIdle())); + do { + // now loop until we are fast enough... shouldn't take long + primary.acquireSearcher("test").close(); + } while (primary.isSearchIdle()); closeShards(primary); } @@ -3482,15 +3567,14 @@ public void testFlushOnInactive() throws Exception { ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); AtomicBoolean markedInactive = new AtomicBoolean(); AtomicReference primaryRef = new AtomicReference<>(); - IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, - new InternalEngineFactory(), () -> { - }, new IndexEventListener() { - @Override - public void onShardInactive(IndexShard indexShard) { - markedInactive.set(true); - primaryRef.get().flush(new FlushRequest()); - } - }); + IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, new InternalEngineFactory(), () -> { }, + RetentionLeaseSyncer.EMPTY, new IndexEventListener() { + @Override + public void onShardInactive(IndexShard indexShard) { + markedInactive.set(true); + primaryRef.get().flush(new FlushRequest()); + } + }); primaryRef.set(primary); recoverShardFromStore(primary); for (int i = 0; i < 3; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index d074ef3375833..c7d59fdb7c25e 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -42,21 +42,30 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.translog.TestTranslog; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class PrimaryReplicaSyncerTests extends IndexShardTestCase { @@ -115,16 +124,10 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { assertThat(resyncRequest.getMaxSeenAutoIdTimestampOnPrimary(), equalTo(shard.getMaxSeenAutoIdTimestamp())); } if (syncNeeded && globalCheckPoint < numDocs - 1) { - if (shard.indexSettings.isSoftDeleteEnabled()) { - assertThat(resyncTask.getSkippedOperations(), equalTo(0)); - assertThat(resyncTask.getResyncedOperations(), equalTo(resyncTask.getTotalOperations())); - assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint))); - } else { - int skippedOps = Math.toIntExact(globalCheckPoint + 1); // everything up to global checkpoint included - assertThat(resyncTask.getSkippedOperations(), equalTo(skippedOps)); - assertThat(resyncTask.getResyncedOperations(), equalTo(numDocs - skippedOps)); - assertThat(resyncTask.getTotalOperations(), equalTo(globalCheckPoint == numDocs - 1 ? 0 : numDocs)); - } + int skippedOps = Math.toIntExact(globalCheckPoint + 1); // everything up to global checkpoint included + assertThat(resyncTask.getSkippedOperations(), equalTo(skippedOps)); + assertThat(resyncTask.getResyncedOperations(), equalTo(numDocs - skippedOps)); + assertThat(resyncTask.getTotalOperations(), equalTo(globalCheckPoint == numDocs - 1 ? 0 : numDocs)); } else { assertThat(resyncTask.getSkippedOperations(), equalTo(0)); assertThat(resyncTask.getResyncedOperations(), equalTo(0)); @@ -192,6 +195,31 @@ public void onResponse(PrimaryReplicaSyncer.ResyncTask result) { } } + public void testDoNotSendOperationsWithoutSequenceNumber() throws Exception { + IndexShard shard = spy(newStartedShard(true)); + when(shard.getGlobalCheckpoint()).thenReturn(SequenceNumbers.UNASSIGNED_SEQ_NO); + int numOps = between(0, 20); + List operations = new ArrayList<>(); + for (int i = 0; i < numOps; i++) { + operations.add(new Translog.Index( + "_doc", Integer.toString(i), randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : i, primaryTerm, new byte[]{1})); + } + doReturn(TestTranslog.newSnapshotFromOperations(operations)).when(shard).getHistoryOperations(anyString(), anyLong()); + TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); + List sentOperations = new ArrayList<>(); + PrimaryReplicaSyncer.SyncAction syncAction = (request, parentTask, allocationId, primaryTerm, listener) -> { + sentOperations.addAll(Arrays.asList(request.getOperations())); + listener.onResponse(new ResyncReplicationResponse()); + }; + PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(taskManager, syncAction); + syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10))); + PlainActionFuture fut = new PlainActionFuture<>(); + syncer.resync(shard, fut); + fut.actionGet(); + assertThat(sentOperations, equalTo(operations.stream().filter(op -> op.seqNo() >= 0).collect(Collectors.toList()))); + closeShards(shard); + } + public void testStatusSerialization() throws IOException { PrimaryReplicaSyncer.ResyncTask.Status status = new PrimaryReplicaSyncer.ResyncTask.Status(randomAlphaOfLength(10), randomIntBetween(0, 1000), randomIntBetween(0, 1000), randomIntBetween(0, 1000)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index c80b3b5074921..4ad95c43b7077 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -148,7 +148,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { () -> primaryTerm, EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); - engine.initializeMaxSeqNoOfUpdatesOrDeletes(); + engine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog((e, s) -> 0, Long.MAX_VALUE); listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 1c3c3b28773cf..2079b80cd386c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.TranslogCorruptedException; @@ -107,11 +108,8 @@ public void setup() throws IOException { .putMapping("_doc", "{ \"properties\": {} }"); indexMetaData = metaData.build(); - indexShard = newStartedShard(p -> - newShard(routing, shardPath, indexMetaData, null, null, - new InternalEngineFactory(), () -> { - }, EMPTY_EVENT_LISTENER), - true); + indexShard = newStartedShard(p -> newShard(routing, shardPath, indexMetaData, null, null, + new InternalEngineFactory(), () -> { }, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER), true); translogPath = shardPath.resolveTranslog(); indexPath = shardPath.resolveIndex(); @@ -371,8 +369,8 @@ private IndexShard reopenIndexShard(boolean corrupted) throws IOException { return new Store(shardId, indexSettings, baseDirectoryWrapper, new DummyShardLock(shardId)); }; - return newShard(shardRouting, shardPath, metaData, storeProvider, null, - indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + return newShard(shardRouting, shardPath, metaData, storeProvider, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), indexShard.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER); } private int indexDocs(IndexShard indexShard, boolean flushLast) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java b/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java index 49de00dd8bef6..509f5e2a4c41b 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Set; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class ByteSizeCachingDirectoryTests extends ESTestCase { @@ -45,6 +46,12 @@ public long fileLength(String name) throws IOException { numFileLengthCalls++; return super.fileLength(name); } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } public void testBasics() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index d6690fd27cc8b..e99c2a847f9fb 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -40,11 +40,15 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; import org.apache.lucene.store.BaseDirectoryWrapper; +import org.apache.lucene.store.ByteBufferIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; @@ -925,17 +929,17 @@ public void testCanOpenIndex() throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); Path tempDir = createTempDir(); final BaseDirectoryWrapper dir = newFSDirectory(tempDir); - assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); + assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id))); IndexWriter writer = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.addDocument(doc); writer.commit(); writer.close(); - assertTrue(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); + assertTrue(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id))); Store store = new Store(shardId, INDEX_SETTINGS, dir, new DummyShardLock(shardId)); store.markStoreCorrupted(new CorruptIndexException("foo", "bar")); - assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id))); + assertFalse(StoreUtils.canOpenIndex(logger, tempDir, shardId, (id, l, d) -> new DummyShardLock(id))); store.close(); } @@ -1080,4 +1084,58 @@ public void testHistoryUUIDCanBeForced() throws IOException { assertThat(segmentInfos.getUserData().get(Engine.HISTORY_UUID_KEY), not(equalTo(oldHistoryUUID))); } } + + public void testDeoptimizeMMap() throws IOException { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .put(Store.FORCE_RAM_TERM_DICT.getKey(), true).build()); + final ShardId shardId = new ShardId("index", "_na_", 1); + String file = "test." + (randomBoolean() ? "tip" : "cfs"); + try (Store store = new Store(shardId, indexSettings, new MMapDirectory(createTempDir()), new DummyShardLock(shardId))) { + try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) { + output.writeInt(0); + } + try (IndexOutput output = store.directory().createOutput("someOtherFile.txt", IOContext.DEFAULT)) { + output.writeInt(0); + } + try (IndexInput input = store.directory().openInput(file, IOContext.DEFAULT)) { + assertFalse(input instanceof ByteBufferIndexInput); + assertFalse(input.clone() instanceof ByteBufferIndexInput); + assertFalse(input.slice("foo", 1, 1) instanceof ByteBufferIndexInput); + } + + try (IndexInput input = store.directory().openInput("someOtherFile.txt", IOContext.DEFAULT)) { + assertTrue(input instanceof ByteBufferIndexInput); + assertTrue(input.clone() instanceof ByteBufferIndexInput); + assertTrue(input.slice("foo", 1, 1) instanceof ByteBufferIndexInput); + } + } + + indexSettings = IndexSettingsModule.newIndexSettings("index", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .put(Store.FORCE_RAM_TERM_DICT.getKey(), false).build()); + + try (Store store = new Store(shardId, indexSettings, new MMapDirectory(createTempDir()), new DummyShardLock(shardId))) { + try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) { + output.writeInt(0); + } + try (IndexInput input = store.directory().openInput(file, IOContext.DEFAULT)) { + assertTrue(input instanceof ByteBufferIndexInput); + assertTrue(input.clone() instanceof ByteBufferIndexInput); + assertTrue(input.slice("foo", 1, 1) instanceof ByteBufferIndexInput); + } + } + } + + public void testGetPendingFiles() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + final String testfile = "testfile"; + try (Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId))) { + store.directory().createOutput(testfile, IOContext.DEFAULT).close(); + try (IndexInput input = store.directory().openInput(testfile, IOContext.DEFAULT)) { + store.directory().deleteFile(testfile); + assertEquals(FilterDirectory.unwrap(store.directory()).getPendingDeletions(), store.directory().getPendingDeletions()); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 003054fc71550..a3ebfff478e98 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -37,6 +37,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Random; import java.util.Set; @@ -142,4 +143,28 @@ public static List drainSnapshot(Translog.Snapshot snapshot, } return ops; } + + public static Translog.Snapshot newSnapshotFromOperations(List operations) { + final Iterator iterator = operations.iterator(); + return new Translog.Snapshot() { + @Override + public int totalOperations() { + return operations.size(); + } + + @Override + public Translog.Operation next() { + if (iterator.hasNext()) { + return iterator.next(); + } else { + return null; + } + } + + @Override + public void close() { + + } + }; + } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 7e13b38fd3d25..90909b1c201a4 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -386,10 +386,10 @@ public void testCacheWithFilteredAlias() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); client.prepareIndex("index", "type", "1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); - refresh(); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); + refresh(); assertCacheState(client, "index", 0, 0); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java new file mode 100644 index 0000000000000..15b45330530d3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java @@ -0,0 +1,290 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.Query; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.RemovalNotification; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine.Searcher; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesRequestCache.Key; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class IndicesServiceCloseTests extends ESTestCase { + + private Node startNode() throws NodeValidationException { + final Path tempDir = createTempDir(); + String nodeName = "node_s_0"; + Settings settings = Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", random().nextLong())) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) + .put(Node.NODE_NAME_SETTING.getKey(), nodeName) + .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") + .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .put("transport.type", getTestTransportType()) + .put(Node.NODE_DATA_SETTING.getKey(), true) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + // default the watermarks low values to prevent tests from failing on nodes without enough disk space + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "1b") + // turning on the real memory circuit breaker leads to spurious test failures. As have no full control over heap usage, we + // turn it off for these tests. + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) + .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes + .putList(INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) + .build(); + + Node node = new MockNode(settings, + Arrays.asList(MockNioTransportPlugin.class, MockHttpTransport.TestPlugin.class, InternalSettingsPlugin.class), true); + node.start(); + return node; + } + + public void testCloseEmptyIndicesService() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + node.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseNonEmptyIndicesService() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + node.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseWithIncedRefStore() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + shard.store().incRef(); + + node.close(); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + shard.store().decRef(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseWhileOngoingRequest() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + node.client().prepareIndex("test", "_doc", "1").setSource(Collections.emptyMap()).get(); + ElasticsearchAssertions.assertAllSuccessful(node.client().admin().indices().prepareRefresh("test").get()); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + Searcher searcher = shard.acquireSearcher("test"); + assertEquals(1, searcher.reader().maxDoc()); + + node.close(); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + searcher.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + } + + public void testCloseAfterRequestHasUsedQueryCache() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true))); + node.client().prepareIndex("test", "_doc", "1").setSource(Collections.singletonMap("foo", 3L)).get(); + ElasticsearchAssertions.assertAllSuccessful(node.client().admin().indices().prepareRefresh("test").get()); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndicesQueryCache cache = indicesService.getIndicesQueryCache(); + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + Searcher searcher = shard.acquireSearcher("test"); + assertEquals(1, searcher.reader().maxDoc()); + + Query query = LongPoint.newRangeQuery("foo", 0, 5); + assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + searcher.searcher().count(query); + assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + + searcher.close(); + assertEquals(2, indicesService.indicesRefCount.refCount()); + assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + + node.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + } + + public void testCloseWhileOngoingRequestUsesQueryCache() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true))); + node.client().prepareIndex("test", "_doc", "1").setSource(Collections.singletonMap("foo", 3L)).get(); + ElasticsearchAssertions.assertAllSuccessful(node.client().admin().indices().prepareRefresh("test").get()); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndicesQueryCache cache = indicesService.getIndicesQueryCache(); + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + Searcher searcher = shard.acquireSearcher("test"); + assertEquals(1, searcher.reader().maxDoc()); + + node.close(); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + Query query = LongPoint.newRangeQuery("foo", 0, 5); + assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + searcher.searcher().count(query); + assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); + + searcher.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); + } + + public void testCloseWhileOngoingRequestUsesRequestCache() throws Exception { + Node node = startNode(); + IndicesService indicesService = node.injector().getInstance(IndicesService.class); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertAcked(node.client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true))); + node.client().prepareIndex("test", "_doc", "1").setSource(Collections.singletonMap("foo", 3L)).get(); + ElasticsearchAssertions.assertAllSuccessful(node.client().admin().indices().prepareRefresh("test").get()); + + assertEquals(2, indicesService.indicesRefCount.refCount()); + + IndicesRequestCache cache = indicesService.indicesRequestCache; + + IndexService indexService = indicesService.iterator().next(); + IndexShard shard = indexService.getShard(0); + Searcher searcher = shard.acquireSearcher("test"); + assertEquals(1, searcher.reader().maxDoc()); + + node.close(); + assertEquals(1, indicesService.indicesRefCount.refCount()); + + assertEquals(0L, cache.count()); + IndicesRequestCache.CacheEntity cacheEntity = new IndicesRequestCache.CacheEntity() { + @Override + public long ramBytesUsed() { + return 42; + } + + @Override + public void onCached(Key key, BytesReference value) {} + + @Override + public boolean isOpen() { + return true; + } + + @Override + public Object getCacheIdentity() { + return this; + } + + @Override + public void onHit() {} + + @Override + public void onMiss() {} + + @Override + public void onRemoval(RemovalNotification notification) {} + }; + cache.getOrCompute(cacheEntity, () -> new BytesArray("bar"), searcher.getDirectoryReader(), new BytesArray("foo"), () -> "foo"); + assertEquals(1L, cache.count()); + + searcher.close(); + assertEquals(0, indicesService.indicesRefCount.refCount()); + assertEquals(0L, cache.count()); + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 60dbad99795f3..bf1c4db11bb2e 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -318,8 +318,10 @@ public void testPendingTasks() throws Exception { assertTrue(indicesService.hasUncompletedPendingDeletes()); // shard lock released... we can now delete indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS)); - assertEquals(indicesService.numPendingDeletes(test.index()), 0); - assertTrue(indicesService.hasUncompletedPendingDeletes()); // "bogus" index has not been removed + assertBusy(() -> { + assertEquals(indicesService.numPendingDeletes(test.index()), 0); + assertTrue(indicesService.hasUncompletedPendingDeletes()); // "bogus" index has not been removed + }); } assertAcked(client().admin().indices().prepareOpen("test").setTimeout(TimeValue.timeValueSeconds(1))); @@ -642,5 +644,48 @@ public static ClusterState createClusterForShardLimitTest(int nodesInCluster, in .build(); } + public void testOptimizeAutoGeneratedIdsSettingRemoval() throws Exception { + final IndicesService indicesService = getIndicesService(); + + final Index index = new Index("foo-index", UUIDs.randomBase64UUID()); + Settings.Builder builder = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()) + .settings(builder.build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexService indexService = indicesService.createIndex(indexMetaData, Collections.emptyList()); + assertNotNull(indexService); + + final Index index2 = new Index("bar-index", UUIDs.randomBase64UUID()); + Settings.Builder builder2 = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0) + .put(IndexMetaData.SETTING_INDEX_UUID, index2.getUUID()) + .put(EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey(), randomBoolean()); + IndexMetaData indexMetaData2 = new IndexMetaData.Builder(index2.getName()) + .settings(builder2.build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> indicesService.createIndex(indexMetaData2, Collections.emptyList())); + assertEquals("Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0", + ex.getMessage()); + + Version version = randomFrom(Version.V_6_0_0_rc1, Version.V_6_0_0, Version.V_6_2_0, Version.V_6_3_0, Version.V_6_4_0); + builder = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .put(IndexMetaData.SETTING_INDEX_UUID, index2.getUUID()) + .put(EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey(), randomBoolean()); + IndexMetaData indexMetaData3 = new IndexMetaData.Builder(index2.getName()) + .settings(builder.build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexService indexService2 = indicesService.createIndex(indexMetaData3, Collections.emptyList()); + assertNotNull(indexService2); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java index d535e9e00ee53..e1c7c3fafd274 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/FakeThreadPoolMasterService.java @@ -84,6 +84,10 @@ public void execute(Runnable command) { }; } + public int getFakeMasterServicePendingTaskCount() { + return pendingTasks.size(); + } + private void scheduleNextTaskIfNecessary() { if (taskInProgress == false && pendingTasks.isEmpty() == false && scheduledNextTask == false) { scheduledNextTask = true; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index 4e43f95d84447..0c056da34a56d 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -100,8 +100,8 @@ public void run() { finished.set(true); indexingThread.join(); refresh("test"); - ElasticsearchAssertions.assertHitCount(client().prepareSearch("test").get(), numAutoGenDocs.get()); - ElasticsearchAssertions.assertHitCount(client().prepareSearch("test")// extra paranoia ;) + ElasticsearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true).get(), numAutoGenDocs.get()); + ElasticsearchAssertions.assertHitCount(client().prepareSearch("test").setTrackTotalHits(true)// extra paranoia ;) .setQuery(QueryBuilders.termQuery("auto", true)).get(), numAutoGenDocs.get()); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ea15eceb8be84..ea3e933a88314 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -26,11 +26,13 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RecoverySource; @@ -786,4 +788,55 @@ public void sendRequest(Transport.Connection connection, long requestId, String assertHitCount(client().prepareSearch(indexName).get(), numDocs); } } + + @TestLogging("org.elasticsearch.indices.recovery:TRACE") + public void testHistoryRetention() throws Exception { + internalCluster().startNodes(3); + + final String indexName = "test"; + client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)).get(); + ensureGreen(indexName); + + // Perform some replicated operations so the replica isn't simply empty, because ops-based recovery isn't better in that case + final List requests = new ArrayList<>(); + final int replicatedDocCount = scaledRandomIntBetween(25, 250); + while (requests.size() < replicatedDocCount) { + requests.add(client().prepareIndex(indexName, "_doc").setSource("{}", XContentType.JSON)); + } + indexRandom(true, requests); + if (randomBoolean()) { + flush(indexName); + } + + internalCluster().stopRandomNode(s -> true); + internalCluster().stopRandomNode(s -> true); + + final long desyncNanoTime = System.nanoTime(); + while (System.nanoTime() <= desyncNanoTime) { + // time passes + } + + final int numNewDocs = scaledRandomIntBetween(25, 250); + for (int i = 0; i < numNewDocs; i++) { + client().prepareIndex(indexName, "_doc").setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + } + // Flush twice to update the safe commit's local checkpoint + assertThat(client().admin().indices().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); + assertThat(client().admin().indices().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); + + assertAcked(client().admin().indices().prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))); + internalCluster().startNode(); + ensureGreen(indexName); + + final RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(indexName)).get(); + final List recoveryStates = recoveryResponse.shardRecoveryStates().get(indexName); + recoveryStates.removeIf(r -> r.getTimer().getStartNanoTime() <= desyncNanoTime); + + assertThat(recoveryStates, hasSize(1)); + assertThat(recoveryStates.get(0).getIndex().totalFileCount(), is(0)); + assertThat(recoveryStates.get(0).getTranslog().recoveredOperations(), greaterThan(0)); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index a2ec88cf7b58c..41ea9a8bea74b 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -189,7 +189,7 @@ public void testWriteFileChunksConcurrently() throws Exception { for (Thread sender : senders) { sender.join(); } - recoveryTarget.renameAllTempFiles(); + recoveryTarget.cleanFiles(0, sourceSnapshot); recoveryTarget.decRef(); Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata(); Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 8391827b2f83c..dac02f6fe9766 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -97,7 +97,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.IntSupplier; import java.util.function.Supplier; -import java.util.stream.Collectors; import java.util.zip.CRC32; import static java.util.Collections.emptyMap; @@ -231,8 +230,7 @@ public void testSendSnapshotSendsOps() throws IOException { operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i - initialNumberOfDocs, true))); } final long startingSeqNo = randomIntBetween(0, numberOfDocsWithValidSequenceNumbers - 1); - final long requiredStartingSeqNo = randomIntBetween((int) startingSeqNo, numberOfDocsWithValidSequenceNumbers - 1); - final long endingSeqNo = randomIntBetween((int) requiredStartingSeqNo - 1, numberOfDocsWithValidSequenceNumbers - 1); + final long endingSeqNo = randomLongBetween(startingSeqNo, numberOfDocsWithValidSequenceNumbers - 1); final List shippedOps = new ArrayList<>(); final AtomicLong checkpointOnTarget = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -247,7 +245,7 @@ public void indexTranslogOperations(List operations, int tot }; RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, between(1, 10)); PlainActionFuture future = new PlainActionFuture<>(); - handler.phase2(startingSeqNo, requiredStartingSeqNo, endingSeqNo, newTranslogSnapshot(operations, Collections.emptyList()), + handler.phase2(startingSeqNo, endingSeqNo, newTranslogSnapshot(operations, Collections.emptyList()), randomNonNegativeLong(), randomNonNegativeLong(), RetentionLeases.EMPTY, future); final int expectedOps = (int) (endingSeqNo - startingSeqNo + 1); RecoverySourceHandler.SendSnapshotResult result = future.actionGet(); @@ -258,18 +256,6 @@ public void indexTranslogOperations(List operations, int tot assertThat(shippedOps.get(i), equalTo(operations.get(i + (int) startingSeqNo + initialNumberOfDocs))); } assertThat(result.targetLocalCheckpoint, equalTo(checkpointOnTarget.get())); - if (endingSeqNo >= requiredStartingSeqNo + 1) { - // check that missing ops blows up - List requiredOps = operations.subList(0, operations.size() - 1).stream() // remove last null marker - .filter(o -> o.seqNo() >= requiredStartingSeqNo && o.seqNo() <= endingSeqNo).collect(Collectors.toList()); - List opsToSkip = randomSubsetOf(randomIntBetween(1, requiredOps.size()), requiredOps); - PlainActionFuture failedFuture = new PlainActionFuture<>(); - expectThrows(IllegalStateException.class, () -> { - handler.phase2(startingSeqNo, requiredStartingSeqNo, endingSeqNo, newTranslogSnapshot(operations, opsToSkip), - randomNonNegativeLong(), randomNonNegativeLong(), RetentionLeases.EMPTY, failedFuture); - failedFuture.actionGet(); - }); - } } public void testSendSnapshotStopOnError() throws Exception { @@ -299,7 +285,7 @@ public void indexTranslogOperations(List operations, int tot PlainActionFuture future = new PlainActionFuture<>(); final long startingSeqNo = randomLongBetween(0, ops.size() - 1L); final long endingSeqNo = randomLongBetween(startingSeqNo, ops.size() - 1L); - handler.phase2(startingSeqNo, startingSeqNo, endingSeqNo, newTranslogSnapshot(ops, Collections.emptyList()), + handler.phase2(startingSeqNo, endingSeqNo, newTranslogSnapshot(ops, Collections.emptyList()), randomNonNegativeLong(), randomNonNegativeLong(), RetentionLeases.EMPTY, future); if (wasFailed.get()) { assertThat(expectThrows(RuntimeException.class, () -> future.actionGet()).getMessage(), equalTo("test - failed to index")); @@ -450,10 +436,12 @@ protected void failEngine(IOException cause) { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); fail("exception index"); } catch (RuntimeException ex) { - assertNull(ExceptionsHelper.unwrapCorruption(ex)); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); } else { + assertNull(unwrappedCorruption); assertEquals(ex.getMessage(), "boom"); } } catch (CorruptIndexException ex) { @@ -498,11 +486,11 @@ void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, A } @Override - void phase2(long startingSeqNo, long requiredSeqNoRangeStart, long endingSeqNo, Translog.Snapshot snapshot, + void phase2(long startingSeqNo, long endingSeqNo, Translog.Snapshot snapshot, long maxSeenAutoIdTimestamp, long maxSeqNoOfUpdatesOrDeletes, RetentionLeases retentionLeases, ActionListener listener) throws IOException { phase2Called.set(true); - super.phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot, + super.phase2(startingSeqNo, endingSeqNo, snapshot, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes, retentionLeases, listener); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index ed1ee7708522d..b974d42d826bb 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -20,8 +20,6 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.IndexOutput; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -32,9 +30,6 @@ import java.util.Set; import java.util.regex.Pattern; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; - public class RecoveryStatusTests extends ESSingleNodeTestCase { private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT .minimumIndexCompatibilityVersion().luceneVersion; @@ -42,35 +37,27 @@ public void testRenameTempFiles() throws IOException { IndexService service = createIndex("foo"); IndexShard indexShard = service.getShardOrNull(0); - DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - RecoveryTarget status = new RecoveryTarget(indexShard, node, new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - } - }, version -> {}); - try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength() - , "9z51nw", MIN_SUPPORTED_LUCENE_VERSION), status.store())) { + MultiFileWriter multiFileWriter = new MultiFileWriter(indexShard.store(), + indexShard.recoveryState().getIndex(), "recovery.test.", logger, () -> {}); + try (IndexOutput indexOutput = multiFileWriter.openAndPutIndexOutput("foo.bar", + new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw", MIN_SUPPORTED_LUCENE_VERSION), indexShard.store())) { indexOutput.writeInt(1); - IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar"); + IndexOutput openIndexOutput = multiFileWriter.getOpenIndexOutput("foo.bar"); assertSame(openIndexOutput, indexOutput); openIndexOutput.writeInt(1); CodecUtil.writeFooter(indexOutput); } try { - status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw", - MIN_SUPPORTED_LUCENE_VERSION), status.store()); + multiFileWriter.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw", + MIN_SUPPORTED_LUCENE_VERSION), indexShard.store()); fail("file foo.bar is already opened and registered"); } catch (IllegalStateException ex) { assertEquals("output for file [foo.bar] has already been created", ex.getMessage()); // all well = it's already registered } - status.removeOpenIndexOutputs("foo.bar"); - Set strings = Sets.newHashSet(status.store().directory().listAll()); + multiFileWriter.removeOpenIndexOutputs("foo.bar"); + Set strings = Sets.newHashSet(indexShard.store().directory().listAll()); String expectedFile = null; for (String file : strings) { if (Pattern.compile("recovery[.][\\w-]+[.]foo[.]bar").matcher(file).matches()) { @@ -80,12 +67,10 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo } assertNotNull(expectedFile); indexShard.close("foo", false);// we have to close it here otherwise rename fails since the write.lock is held by the engine - status.renameAllTempFiles(); - strings = Sets.newHashSet(status.store().directory().listAll()); + multiFileWriter.renameAllTempFiles(); + strings = Sets.newHashSet(indexShard.store().directory().listAll()); assertTrue(strings.toString(), strings.contains("foo.bar")); assertFalse(strings.toString(), strings.contains(expectedFile)); - // we must fail the recovery because marking it as done will try to move the shard to POST_RECOVERY, - // which will fail because it's started - status.fail(new RecoveryFailedException(status.state(), "end of test. OK.", null), false); + multiFileWriter.close(); } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 48061b11d58c7..2761333ef5628 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -68,8 +68,7 @@ public void testTranslogHistoryTransferred() throws Exception { shards.addReplica(); shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); - boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + assertThat(getTranslog(replica).totalOperations(), equalTo(docs + moreDocs)); shards.assertAllEqual(docs + moreDocs); } } @@ -282,8 +281,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.recoverReplica(newReplica); // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -387,8 +385,7 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { shards.recoverReplica(replica); // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); - boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); + assertThat(getTranslog(replica).totalOperations(), equalTo(numDocs)); shards.assertAllEqual(numDocs); } } diff --git a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index d749ce367cf0b..066fd96455a9e 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.Collection; @@ -47,6 +48,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.nullValue; public class UpdateSettingsIT extends ESIntegTestCase { @@ -126,6 +128,16 @@ public List> getSettings() { } } + /** + * Needed by {@link UpdateSettingsIT#testEngineGCDeletesSetting()} + */ + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put("thread_pool.estimated_time_interval", 0) + .build(); + } + public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() @@ -435,7 +447,7 @@ public void testOpenCloseUpdateSettings() throws Exception { assertThat(getSettingsResponse.getSetting("test", "index.final"), nullValue()); } - public void testEngineGCDeletesSetting() throws InterruptedException { + public void testEngineGCDeletesSetting() throws Exception { createIndex("test"); client().prepareIndex("test", "type", "1").setSource("f", 1).get(); DeleteResponse response = client().prepareDelete("test", "type", "1").get(); @@ -443,15 +455,20 @@ public void testEngineGCDeletesSetting() throws InterruptedException { long primaryTerm = response.getPrimaryTerm(); // delete is still in cache this should work client().prepareIndex("test", "type", "1").setSource("f", 2).setIfSeqNo(seqNo).setIfPrimaryTerm(primaryTerm).get(); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.gc_deletes", 0)).get(); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.gc_deletes", 0))); response = client().prepareDelete("test", "type", "1").get(); seqNo = response.getSeqNo(); - Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed. + + // Make sure the time has advanced for InternalEngine#resolveDocVersion() + for (ThreadPool threadPool : internalCluster().getInstances(ThreadPool.class)) { + long startTime = threadPool.relativeTimeInMillis(); + assertBusy(() -> assertThat(threadPool.relativeTimeInMillis(), greaterThan(startTime))); + } + // delete is should not be in cache assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setIfSeqNo(seqNo).setIfPrimaryTerm(primaryTerm), VersionConflictEngineException.class); - } public void testUpdateSettingsWithBlocks() { diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java index 99c50a839abc6..2125184baef63 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.indices.state; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -32,13 +34,14 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; import org.elasticsearch.indices.recovery.StartRecoveryRequest; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -57,6 +60,7 @@ import static org.elasticsearch.indices.state.CloseIndexIT.assertIndexIsOpened; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; @ESIntegTestCase.ClusterScope(minNumDataNodes = 2) public class CloseWhileRelocatingShardsIT extends ESIntegTestCase { @@ -68,9 +72,11 @@ protected Collection> nodePlugins() { @Override protected Settings nodeSettings(int nodeOrdinal) { + final int maxRecoveries = Integer.MAX_VALUE; return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), maxRecoveries) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), maxRecoveries) .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), -1) .build(); } @@ -80,7 +86,6 @@ protected int maximumNumberOfShards() { return 3; } - @TestLogging("org.elasticsearch.cluster.metadata.MetaDataIndexStateService:DEBUG,org.elasticsearch.action.admin.indices.close:DEBUG") public void testCloseWhileRelocatingShards() throws Exception { final String[] indices = new String[randomIntBetween(3, 5)]; final Map docsPerIndex = new HashMap<>(); @@ -119,21 +124,19 @@ public void testCloseWhileRelocatingShards() throws Exception { final String targetNode = internalCluster().startDataOnlyNode(); ensureClusterSizeConsistency(); // wait for the master to finish processing join. - final MockTransportService targetTransportService = - (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); - final Set acknowledgedCloses = ConcurrentCollections.newConcurrentSet(); try { final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + final ClusterState state = clusterService.state(); final CountDownLatch latch = new CountDownLatch(indices.length); - final CountDownLatch release = new CountDownLatch(1); + final CountDownLatch release = new CountDownLatch(indices.length); // relocate one shard for every index to be closed final AllocationCommands commands = new AllocationCommands(); for (final String index : indices) { final NumShards numShards = getNumShards(index); final int shardId = numShards.numPrimaries == 1 ? 0 : randomIntBetween(0, numShards.numPrimaries - 1); - final IndexRoutingTable indexRoutingTable = clusterService.state().routingTable().index(index); + final IndexRoutingTable indexRoutingTable = state.routingTable().index(index); final ShardRouting primary = indexRoutingTable.shard(shardId).primaryShard(); assertTrue(primary.started()); @@ -146,24 +149,49 @@ public void testCloseWhileRelocatingShards() throws Exception { currentNodeId = replica.currentNodeId(); } } + commands.add(new MoveAllocationCommand(index, shardId, state.nodes().resolveNode(currentNodeId).getName(), targetNode)); + } + + // Build the list of shards for which recoveries will be blocked + final Set blockedShards = commands.commands().stream() + .map(c -> (MoveAllocationCommand) c) + .map(c -> new ShardId(clusterService.state().metaData().index(c.index()).getIndex(), c.shardId())) + .collect(Collectors.toSet()); + assertThat(blockedShards, hasSize(indices.length)); + + final Set acknowledgedCloses = ConcurrentCollections.newConcurrentSet(); + final Set interruptedRecoveries = ConcurrentCollections.newConcurrentSet(); - final DiscoveryNode sourceNode = clusterService.state().nodes().resolveNode(primary.currentNodeId()); - targetTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, sourceNode.getName()), - (connection, requestId, action, request, options) -> { - if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action)) { - logger.debug("blocking recovery of shard {}", ((StartRecoveryRequest) request).shardId()); - latch.countDown(); - try { - release.await(); - logger.debug("releasing recovery of shard {}", ((StartRecoveryRequest) request).shardId()); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - } - connection.sendRequest(requestId, action, request, options); + // Create a SendRequestBehavior that will block outgoing start recovery request + final StubbableTransport.SendRequestBehavior sendBehavior = (connection, requestId, action, request, options) -> { + if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action)) { + final StartRecoveryRequest startRecoveryRequest = ((StartRecoveryRequest) request); + if (blockedShards.contains(startRecoveryRequest.shardId())) { + logger.debug("blocking recovery of shard {}", startRecoveryRequest.shardId()); + latch.countDown(); + try { + release.await(); + logger.debug("releasing recovery of shard {}", startRecoveryRequest.shardId()); + } catch (final InterruptedException e) { + logger.warn(() -> new ParameterizedMessage("exception when releasing recovery of shard {}", + startRecoveryRequest.shardId()), e); + interruptedRecoveries.add(startRecoveryRequest.shardId().getIndexName()); + Thread.currentThread().interrupt(); + return; } - ); - commands.add(new MoveAllocationCommand(index, shardId, currentNodeId, targetNode)); + } + } + connection.sendRequest(requestId, action, request, options); + }; + + final MockTransportService targetTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); + + for (DiscoveryNode node : state.getNodes()) { + if (node.isDataNode() && node.getName().equals(targetNode) == false) { + final TransportService sourceTransportService = internalCluster().getInstance(TransportService.class, node.getName()); + targetTransportService.addSendBehavior(sourceTransportService, sendBehavior); + } } assertAcked(client().admin().cluster().reroute(new ClusterRerouteRequest().commands(commands)).get()); @@ -222,12 +250,15 @@ public void testCloseWhileRelocatingShards() throws Exception { targetTransportService.clearAllRules(); + // If a shard recovery has been interrupted, we expect its index to be closed + interruptedRecoveries.forEach(CloseIndexIT::assertIndexIsClosed); + assertThat("Consider that the test failed if no indices were successfully closed", acknowledgedCloses.size(), greaterThan(0)); assertAcked(client().admin().indices().prepareOpen("index-*")); ensureGreen(indices); for (String index : acknowledgedCloses) { - long docsCount = client().prepareSearch(index).setSize(0).get().getHits().getTotalHits().value; + long docsCount = client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get().getHits().getTotalHits().value; assertEquals("Expected " + docsPerIndex.get(index) + " docs in index " + index + " but got " + docsCount + " (close acknowledged=" + acknowledgedCloses.contains(index) + ")", (long) docsPerIndex.get(index), docsCount); } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 5fb67a64d9db5..59e7c21a3e6e8 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -62,7 +62,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -1008,7 +1007,6 @@ private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) { assertEquals(total, shardTotal); } - @TestLogging("_root:DEBUG") // this fails at a very low rate on CI: https://github.com/elastic/elasticsearch/issues/32506 public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); assertAcked(prepareCreate("index").setSettings(settings).get()); @@ -1033,7 +1031,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), equalTo(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); @@ -1044,7 +1041,6 @@ public void testFilterCacheStats() throws Exception { IndicesStatsResponse stats = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(stats); assertThat(stats.getTotal().queryCache.getHitCount(), greaterThan(0L)); - assertThat(stats.getTotal().queryCache.getEvictions(), equalTo(0L)); assertThat(stats.getTotal().queryCache.getMissCount(), greaterThan(0L)); assertThat(stats.getTotal().queryCache.getCacheSize(), greaterThan(0L)); }); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 08c32665adc58..4acb391d9c0ee 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -162,7 +162,7 @@ public void testPersistentActionWithNoAvailableNode() throws Exception { TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); - // Verifying the the task runs on the new node + // Verifying the task runs on the new node assertThat(taskInfo.getTaskId().getNodeId(), equalTo(newNodeId)); internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr"))); @@ -202,7 +202,7 @@ public void testPersistentActionWithNonClusterStateCondition() throws Exception TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]") .get().getTasks().get(0); - // Verifying the the task can now be assigned + // Verifying the task can now be assigned assertThat(taskInfo.getTaskId().getNodeId(), notNullValue()); // Remove the persistent task diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 683d1bc00893b..735efb68091fd 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -269,7 +269,7 @@ public void sendCompletionRequest(final String taskId, final long taskAllocation newClusterState = addTask(state, "test", null, "this_node"); coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state)); - // Check the the task is know to the task manager + // Check the task is know to the task manager assertThat(taskManager.getTasks().size(), equalTo(1)); AllocatedPersistentTask runningTask = (AllocatedPersistentTask)taskManager.getTasks().values().iterator().next(); String persistentId = runningTask.getPersistentTaskId(); @@ -305,7 +305,7 @@ public void sendCompletionRequest(final String taskId, final long taskAllocation executor.get(0).task.markAsFailed(new IOException("test")); } - // Check the the task is now removed from task manager + // Check the task is now removed from task manager assertThat(taskManager.getTasks().values(), empty()); } diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 9220748f38c25..7002a77b6ba10 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -30,20 +30,28 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -58,6 +66,23 @@ public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); + public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return Collections.singletonList(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING); + } + + } + + @Override + protected Collection> nodePlugins() { + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(RetentionLeaseSyncIntervalSettingPlugin.class)) + .collect(Collectors.toList()); + } + public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); @@ -260,7 +285,8 @@ public void testRecoverWhileRelocating() throws Exception { assertAcked(prepareCreate("test", 3, Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, numShards) .put(SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), randomFrom("100ms", "1s", "5s", "30s", "60s")))); final int numDocs = scaledRandomIntBetween(200, 9999); diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index fb455f37d76f3..c2d35279bdff4 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -110,6 +110,13 @@ protected void beforeIndexDeletion() throws Exception { internalCluster().assertSameDocIdsOnShards(); } + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + // sync global checkpoint quickly so we can verify seq_no_stats aligned between all copies after tests. + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s").build(); + } + public void testSimpleRelocationNoIndexing() { logger.info("--> starting [node1] ..."); final String node_1 = internalCluster().startNode(); @@ -279,8 +286,7 @@ public void testRelocationWhileRefreshing() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", numberOfReplicas) .put("index.refresh_interval", -1) // we want to control refreshes - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms")) - .get(); + ).get(); for (int i = 1; i < numberOfNodes; i++) { logger.info("--> starting [node_{}] ...", i); @@ -465,8 +471,7 @@ public void testIndexAndRelocateConcurrently() throws ExecutionException, Interr final Settings.Builder settings = Settings.builder() .put("index.routing.allocation.exclude.color", "blue") .put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)) - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms"); + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(halfNodes - 1)); assertAcked(prepareCreate("test", settings)); assertAllShardsOnNodes("test", redNodes); int numDocs = randomIntBetween(100, 150); @@ -518,8 +523,8 @@ public void testRelocateWhileWaitingForRefresh() { prepareCreate("test", Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) - .put("index.refresh_interval", -1) // we want to control refreshes - ).get(); + // we want to control refreshes + .put("index.refresh_interval", -1)).get(); logger.info("--> index 10 docs"); for (int i = 0; i < 10; i++) { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index ba3fa84a19641..1b59f558db584 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -109,6 +110,7 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { null, new InternalEngineFactory(), () -> {}, + RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER); // restore the shard diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 871e5071ec7b7..a09560c54ce43 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -22,8 +22,10 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; @@ -232,6 +234,38 @@ public void testIncompatibleSnapshotsBlobExists() throws Exception { assertEquals(0, repository.getRepositoryData().getIncompatibleSnapshotIds().size()); } + public void testBadChunksize() throws Exception { + final Client client = client(); + final Path location = ESIntegTestCase.randomRepoPath(node().settings()); + final String repositoryName = "test-repo"; + + expectThrows(RepositoryException.class, () -> + client.admin().cluster().preparePutRepository(repositoryName) + .setType(REPO_TYPE) + .setSettings(Settings.builder().put(node().settings()) + .put("location", location) + .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES)) + .get()); + } + + public void testFsRepositoryCompressDeprecated() { + final Path location = ESIntegTestCase.randomRepoPath(node().settings()); + final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); + final RepositoryMetaData metaData = new RepositoryMetaData("test-repo", REPO_TYPE, settings); + + Settings useCompressSettings = Settings.builder() + .put(node().getEnvironment().settings()) + .put(FsRepository.REPOSITORIES_COMPRESS_SETTING.getKey(), true) + .build(); + Environment useCompressEnvironment = + new Environment(useCompressSettings, node().getEnvironment().configFile()); + + new FsRepository(metaData, useCompressEnvironment, null); + + assertWarnings("[repositories.fs.compress] setting was deprecated in Elasticsearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version."); + } + private BlobStoreRepository setupRepo() { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index 1ed42cb24746b..dd4ca7bfd20e4 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -18,12 +18,22 @@ */ package org.elasticsearch.repositories.fs; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.concurrent.ExecutionException; +import java.util.stream.Stream; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.instanceOf; public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase { @@ -41,4 +51,47 @@ protected void createTestRepository(String name, boolean verify) { protected void afterCreationCheck(Repository repository) { assertThat(repository, instanceOf(FsRepository.class)); } + + public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOException, ExecutionException, InterruptedException { + final String repoName = randomAsciiName(); + final Path repoPath = randomRepoPath(); + + logger.info("--> creating repository {} at {}", repoName, repoPath); + + assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder() + .put("location", repoPath) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + String indexName = randomAsciiName(); + int docCount = iterations(10, 1000); + logger.info("--> create random index {} with {} records", indexName, docCount); + addRandomDocuments(indexName, docCount); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCount); + + final String snapshotName = randomAsciiName(); + logger.info("--> create snapshot {}:{}", repoName, snapshotName); + assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true).setIndices(indexName)); + + assertAcked(client().admin().indices().prepareDelete(indexName)); + assertAcked(client().admin().cluster().prepareDeleteRepository(repoName)); + + final Path deletedPath; + try (Stream contents = Files.list(repoPath.resolve("indices"))) { + //noinspection OptionalGetWithoutIsPresent because we know there's a subdirectory + deletedPath = contents.filter(Files::isDirectory).findAny().get(); + IOUtils.rm(deletedPath); + } + assertFalse(Files.exists(deletedPath)); + + assertAcked(client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder() + .put("location", repoPath).put("readonly", true))); + + final ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> + client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get()); + assertThat(exception.getRootCause(), instanceOf(NoSuchFileException.class)); + + assertFalse("deleted path is not recreated in readonly repository", Files.exists(deletedPath)); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java new file mode 100644 index 0000000000000..2dc6a5b7136fc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; +import java.util.HashMap; +import java.util.Map; + + +public class RestAddVotingConfigExclusionActionTests extends RestActionTestCase { + + private RestAddVotingConfigExclusionAction action; + + @Before + public void setupAction() { + action = new RestAddVotingConfigExclusionAction(Settings.EMPTY, controller()); + } + + public void testResolveVotingConfigExclusionsRequest() { + Map params = new HashMap<>(); + params.put("node_name", "node-1,node-2,node-3"); + RestRequest deprecatedRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withPath("/_cluster/voting_config_exclusions") + .withParams(params) + .build(); + + AddVotingConfigExclusionsRequest addVotingConfigExclusionsRequest = action.resolveVotingConfigExclusionsRequest(deprecatedRequest); + String[] expected = {"node-1","node-2", "node-3"}; + assertArrayEquals(expected, addVotingConfigExclusionsRequest.getNodeDescriptions()); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java index 7a35177c8ad2b..b9346e1c71a03 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestNodesActionTests.java @@ -60,7 +60,7 @@ public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { ClusterState clusterState = mock(ClusterState.class); when(clusterState.nodes()).thenReturn(discoveryNodes); - ClusterStateResponse clusterStateResponse = new ClusterStateResponse(clusterName, clusterState, randomNonNegativeLong(), false); + ClusterStateResponse clusterStateResponse = new ClusterStateResponse(clusterName, clusterState, false); NodesInfoResponse nodesInfoResponse = new NodesInfoResponse(clusterName, Collections.emptyList(), Collections.emptyList()); NodesStatsResponse nodesStatsResponse = new NodesStatsResponse(clusterName, Collections.emptyList(), Collections.emptyList()); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 894a4fa9d4ae3..80ca8268af996 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; @@ -198,7 +199,8 @@ public void testClearIndexDelete() { } public void testCloseSearchContextOnRewriteException() { - createIndex("index"); + // if refresh happens while checking the exception, the subsequent reference count might not match, so we switch it off + createIndex("index", Settings.builder().put("index.refresh_interval", -1).build()); client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); SearchService service = getInstanceFromNode(SearchService.class); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java new file mode 100644 index 0000000000000..3212c18cf278f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTermsTests; +import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValueTests; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class InternalAggregationsTests extends ESTestCase { + + private final NamedWriteableRegistry registry = new NamedWriteableRegistry( + new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedWriteables()); + + public void testReduceEmptyAggs() { + List aggs = Collections.emptyList(); + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, randomBoolean()); + assertNull(InternalAggregations.reduce(aggs, Collections.emptyList(), reduceContext)); + } + + public void testNonFinalReduceTopLevelPipelineAggs() throws IOException { + InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), + 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); + List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms))); + List topLevelPipelineAggs = new ArrayList<>(); + MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); + topLevelPipelineAggs.add((SiblingPipelineAggregator)maxBucketPipelineAggregationBuilder.create()); + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, false); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, topLevelPipelineAggs, reduceContext); + assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); + assertEquals(1, reducedAggs.aggregations.size()); + } + + public void testFinalReduceTopLevelPipelineAggs() throws IOException { + InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), + 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); + + MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); + SiblingPipelineAggregator siblingPipelineAggregator = (SiblingPipelineAggregator) maxBucketPipelineAggregationBuilder.create(); + InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, true); + final InternalAggregations reducedAggs; + if (randomBoolean()) { + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms), + Collections.singletonList(siblingPipelineAggregator)); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); + } else { + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms)); + List topLevelPipelineAggs = Collections.singletonList(siblingPipelineAggregator); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), topLevelPipelineAggs, reduceContext); + } + assertEquals(0, reducedAggs.getTopLevelPipelineAggregators().size()); + assertEquals(2, reducedAggs.aggregations.size()); + } + + public void testSerialization() throws Exception { + List aggsList = new ArrayList<>(); + if (randomBoolean()) { + StringTermsTests stringTermsTests = new StringTermsTests(); + stringTermsTests.init(); + stringTermsTests.setUp(); + aggsList.add(stringTermsTests.createTestInstance()); + } + if (randomBoolean()) { + InternalDateHistogramTests dateHistogramTests = new InternalDateHistogramTests(); + dateHistogramTests.setUp(); + aggsList.add(dateHistogramTests.createTestInstance()); + } + if (randomBoolean()) { + InternalSimpleValueTests simpleValueTests = new InternalSimpleValueTests(); + aggsList.add(simpleValueTests.createTestInstance()); + } + List topLevelPipelineAggs = new ArrayList<>(); + if (randomBoolean()) { + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); + } + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); + } + if (randomBoolean()) { + topLevelPipelineAggs.add((SiblingPipelineAggregator)new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); + } + } + InternalAggregations aggregations = new InternalAggregations(aggsList, topLevelPipelineAggs); + writeToAndReadFrom(aggregations, 0); + } + + private void writeToAndReadFrom(InternalAggregations aggregations, int iteration) throws IOException { + Version version = VersionUtils.randomVersion(random()); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(version); + aggregations.writeTo(out); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytesRef().bytes), registry)) { + in.setVersion(version); + InternalAggregations deserialized = InternalAggregations.readAggregations(in); + assertEquals(aggregations.aggregations, deserialized.aggregations); + if (aggregations.getTopLevelPipelineAggregators() == null) { + assertEquals(0, deserialized.getTopLevelPipelineAggregators().size()); + } else { + if (version.before(Version.V_6_7_0)) { + assertEquals(0, deserialized.getTopLevelPipelineAggregators().size()); + } else { + assertEquals(aggregations.getTopLevelPipelineAggregators().size(), + deserialized.getTopLevelPipelineAggregators().size()); + for (int i = 0; i < aggregations.getTopLevelPipelineAggregators().size(); i++) { + SiblingPipelineAggregator siblingPipelineAggregator1 = aggregations.getTopLevelPipelineAggregators().get(i); + SiblingPipelineAggregator siblingPipelineAggregator2 = deserialized.getTopLevelPipelineAggregators().get(i); + assertArrayEquals(siblingPipelineAggregator1.bucketsPaths(), siblingPipelineAggregator2.bucketsPaths()); + assertEquals(siblingPipelineAggregator1.name(), siblingPipelineAggregator2.name()); + } + } + } + if (iteration < 2) { + //serialize this enough times to make sure that we are able to write again what we read + writeToAndReadFrom(deserialized, iteration + 1); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index a79797f6c822e..eafd88328b799 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -42,6 +42,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBounds; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; @@ -1561,4 +1562,64 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { private ZonedDateTime key(Histogram.Bucket bucket) { return (ZonedDateTime) bucket.getKey(); } + + /** + * See https://github.com/elastic/elasticsearch/issues/39107. Make sure we handle properly different + * timeZones. + */ + public void testDateNanosHistogram() throws Exception { + assertAcked(prepareCreate("nanos").addMapping("_doc", "date", "type=date_nanos").get()); + indexRandom(true, + client().prepareIndex("nanos", "_doc", "1").setSource("date", "2000-01-01")); + indexRandom(true, + client().prepareIndex("nanos", "_doc", "2").setSource("date", "2000-01-02")); + + //Search interval 24 hours + SearchResponse r = client().prepareSearch("nanos") + .addAggregation(dateHistogram("histo").field("date"). + interval(1000 * 60 * 60 * 24).timeZone(ZoneId.of("Europe/Berlin"))) + .addDocValueField("date") + .get(); + assertSearchResponse(r); + + Histogram histogram = r.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946681200000L, ((ZonedDateTime)buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946767600000L, ((ZonedDateTime)buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + + r = client().prepareSearch("nanos") + .addAggregation(dateHistogram("histo").field("date") + .interval(1000 * 60 * 60 * 24).timeZone(ZoneId.of("UTC"))) + .addDocValueField("date") + .get(); + assertSearchResponse(r); + + histogram = r.getAggregations().get("histo"); + buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + assertEquals(946684800000L, ((ZonedDateTime)buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(0).getDocCount()); + assertEquals(946771200000L, ((ZonedDateTime)buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(1, buckets.get(1).getDocCount()); + } + + public void testDateKeyFormatting() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(dateHistogram("histo") + .field("date") + .dateHistogramInterval(DateHistogramInterval.MONTH) + .timeZone(ZoneId.of("America/Edmonton"))) + .get(); + + assertSearchResponse(response); + + InternalDateHistogram histogram = response.getAggregations().get("histo"); + List buckets = histogram.getBuckets(); + assertThat(buckets.get(0).getKeyAsString(), equalTo("2012-01-01T00:00:00.000-07:00")); + assertThat(buckets.get(1).getKeyAsString(), equalTo("2012-02-01T00:00:00.000-07:00")); + assertThat(buckets.get(2).getKeyAsString(), equalTo("2012-03-01T00:00:00.000-07:00")); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index ae6e4cc984fbf..1f295fc33570e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -38,12 +38,14 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -299,7 +301,9 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti public void testSingleValueFieldWithDateMath() throws Exception { ZoneId timezone = randomZone(); int timeZoneOffset = timezone.getRules().getOffset(date(2, 15).toInstant()).getTotalSeconds(); - String suffix = timezone.equals(ZoneOffset.UTC) ? "Z" : timezone.getId(); + //there is a daylight saving time change on 11th March so suffix will be different + String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2,15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); + String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3,15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; SearchResponse response = client().prepareSearch("idx") @@ -321,29 +325,29 @@ public void testSingleValueFieldWithDateMath() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + suffix)); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + suffix + - "-2012-03-15T00:00:00.000" + suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + + "-2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(bucket.getDocCount(), equalTo(2L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + suffix + "-*")); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java index 23a7231e269a1..2b7470f00a0df 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovAvgIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.collect.EvictingQueue; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.metrics.Avg; @@ -110,7 +111,12 @@ public String toString(){ @Override public void setupSuiteScopeCluster() throws Exception { - createIndex("idx"); + prepareCreate("idx").addMapping("type", + XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject(VALUE_FIELD).field("type", "double").endObject() + .endObject() + .endObject().endObject()).execute().get(); createIndex("idx_unmapped"); List builders = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index e111abe0d5132..1467fd1f0971e 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -1715,9 +1715,11 @@ public void testHighlightNoMatchSize() throws IOException { assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some")); // We can also ask for a fragment longer than the input string and get the whole string - field.highlighterType("plain").noMatchSize(text.length() * 2); - response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); - assertHighlight(response, 0, "text", 0, 1, equalTo(text)); + for (String type : new String[] { "plain", "unified" }) { + field.highlighterType(type).noMatchSize(text.length() * 2).numOfFragments(0); + response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); + assertHighlight(response, 0, "text", 0, 1, equalTo(text)); + } field.highlighterType("fvh"); response = client().prepareSearch("test").highlighter(new HighlightBuilder().field(field)).get(); @@ -2945,6 +2947,29 @@ public void testWithNormalizer() throws Exception { } } + public void testDisableHighlightIdField() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("doc", "keyword", "type=keyword")); + ensureGreen(); + + client().prepareIndex("test", "doc", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1") + .setSource("keyword", "Hello World") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + for (String highlighterType : new String[] {"plain", "unified"}) { + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchQuery("_id", "d33f85bf1e51e84d9ab38948db9f3a068e1fe5294f1d8603914ac8c7bcc39ca1")) + .highlighter( + new HighlightBuilder().field(new Field("*") + .highlighterType(highlighterType).requireFieldMatch(false)) + ) + .get(); + assertHitCount(searchResponse, 1); + assertNull(searchResponse.getHits().getAt(0).getHighlightFields().get("_id")); + } + } + public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { public final class MockSnowBall extends TokenFilter { diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 3cb71f6be235c..3d1d5b6876a65 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -420,6 +420,10 @@ public void testRandomGeoCollectionQuery() throws Exception { } } org.apache.lucene.geo.Polygon randomPoly = GeoTestUtil.nextPolygon(); + + assumeTrue("Skipping the check for the polygon with a degenerated dimension", + randomPoly.maxLat - randomPoly.minLat > 8.4e-8 && randomPoly.maxLon - randomPoly.minLon > 8.4e-8); + CoordinatesBuilder cb = new CoordinatesBuilder(); for (int i = 0; i < randomPoly.numPoints(); ++i) { cb.coordinate(randomPoly.getPolyLon(i), randomPoly.getPolyLat(i)); @@ -448,9 +452,6 @@ public void testRandomGeoCollectionQuery() throws Exception { geoShapeQueryBuilder.relation(ShapeRelation.INTERSECTS); SearchResponse result = client().prepareSearch("test").setQuery(geoShapeQueryBuilder).get(); assertSearchResponse(result); - assumeTrue("Skipping the check for the polygon with a degenerated dimension until " - +" https://issues.apache.org/jira/browse/LUCENE-8634 is fixed", - randomPoly.maxLat - randomPoly.minLat > 8.4e-8 && randomPoly.maxLon - randomPoly.minLon > 8.4e-8); assertTrue("query: " + geoShapeQueryBuilder.toString() + " doc: " + Strings.toString(docSource), result.getHits().getTotalHits().value > 0); } diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java new file mode 100644 index 0000000000000..fd72bdfa6de2d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileScorerTests.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile.query; + +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class ProfileScorerTests extends ESTestCase { + + private static class FakeScorer extends Scorer { + + public float maxScore, minCompetitiveScore; + + protected FakeScorer(Weight weight) { + super(weight); + } + + @Override + public DocIdSetIterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public float getMaxScore(int upTo) throws IOException { + return maxScore; + } + + @Override + public float score() throws IOException { + return 1f; + } + + @Override + public int docID() { + throw new UnsupportedOperationException(); + } + + @Override + public void setMinCompetitiveScore(float minScore) { + this.minCompetitiveScore = minScore; + } + } + + public void testPropagateMinCompetitiveScore() throws IOException { + Query query = new MatchAllDocsQuery(); + Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f); + FakeScorer fakeScorer = new FakeScorer(weight); + QueryProfileBreakdown profile = new QueryProfileBreakdown(); + ProfileWeight profileWeight = new ProfileWeight(query, weight, profile); + ProfileScorer profileScorer = new ProfileScorer(profileWeight, fakeScorer, profile); + profileScorer.setMinCompetitiveScore(0.42f); + assertEquals(0.42f, fakeScorer.minCompetitiveScore, 0f); + } + + public void testPropagateMaxScore() throws IOException { + Query query = new MatchAllDocsQuery(); + Weight weight = query.createWeight(new IndexSearcher(new MultiReader()), ScoreMode.TOP_SCORES, 1f); + FakeScorer fakeScorer = new FakeScorer(weight); + QueryProfileBreakdown profile = new QueryProfileBreakdown(); + ProfileWeight profileWeight = new ProfileWeight(query, weight, profile); + ProfileScorer profileScorer = new ProfileScorer(profileWeight, fakeScorer, profile); + profileScorer.setMinCompetitiveScore(0.42f); + fakeScorer.maxScore = 42f; + assertEquals(42f, profileScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS), 0f); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index ac0152582352d..7007c7650f41b 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -664,6 +664,32 @@ public void testMatchQueryNumeric() throws Exception { expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); } + public void testMatchQueryFuzzy() throws Exception { + assertAcked(prepareCreate("test").addMapping("_doc", "text", "type=text")); + + indexRandom(true, client().prepareIndex("test", "_doc", "1").setSource("text", "Unit"), + client().prepareIndex("test", "_doc", "2").setSource("text", "Unity")); + + SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("0")).get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("1")).get(); + assertHitCount(searchResponse, 2L); + assertSearchHits(searchResponse, "1", "2"); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO")).get(); + assertHitCount(searchResponse, 2L); + assertSearchHits(searchResponse, "1", "2"); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO:5,7")).get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "unify").fuzziness("AUTO:5,7")).get(); + assertHitCount(searchResponse, 1L); + assertSearchHits(searchResponse, "2"); + } + + public void testMultiMatchQuery() throws Exception { createIndex("test"); diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index ad3e9fc52e064..98c93c81e5351 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -901,6 +901,22 @@ public void testIgnoreUnmapped() throws Exception { .addSort(SortBuilders.fieldSort("kkk").unmappedType("keyword")) .get(); assertNoFailures(searchResponse); + + // nested field + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setNestedSort(new NestedSortBuilder("nested.foo")))) + .get(); + assertNoFailures(searchResponse); + + // nestedQuery + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .addSort(SortBuilders.fieldSort("nested.foo").unmappedType("keyword") + .setNestedSort(new NestedSortBuilder("nested").setFilter(QueryBuilders.termQuery("nested.foo", "abc")))) + .get(); + assertNoFailures(searchResponse); } public void testSortMVField() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index 2c81a42463c39..d8c2cce0df16e 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -83,7 +83,7 @@ public class SuggestSearchIT extends ESIntegTestCase { // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { - createIndex("test"); + assertAcked(prepareCreate("test").addMapping("type1", "text", "type=text")); ensureGreen(); index("test", "type1", "1", "text", "abcd"); @@ -97,7 +97,7 @@ public void testSuggestAcrossMultipleIndices() throws IOException { .text("abcd"); logger.info("--> run suggestions with one index"); searchSuggest("test", termSuggest); - createIndex("test_1"); + assertAcked(prepareCreate("test_1").addMapping("type1", "text", "type=text")); ensureGreen(); index("test_1", "type1", "1", "text", "ab cd"); @@ -302,7 +302,7 @@ public void testUnmappedField() throws IOException, InterruptedException, Execut } public void testSimple() throws Exception { - createIndex("test"); + assertAcked(prepareCreate("test").addMapping("type1", "text", "type=text")); ensureGreen(); index("test", "type1", "1", "text", "abcd"); @@ -327,7 +327,7 @@ public void testSimple() throws Exception { } public void testEmpty() throws Exception { - createIndex("test"); + assertAcked(prepareCreate("test").addMapping("type1", "text", "type=text")); ensureGreen(); index("test", "type1", "1", "text", "bar"); @@ -346,7 +346,7 @@ public void testEmpty() throws Exception { } public void testWithMultipleCommands() throws Exception { - createIndex("test"); + assertAcked(prepareCreate("test").addMapping("typ1", "field1", "type=text", "field2", "type=text")); ensureGreen(); index("test", "typ1", "1", "field1", "prefix_abcd", "field2", "prefix_efgh"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index d5409821befd5..9fe7356877c8e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -35,6 +35,7 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -74,6 +75,18 @@ public static long getFailureCount(String repository) { return failureCount; } + public static void assertFileCount(Path dir, int expectedCount) throws IOException { + final List found = new ArrayList<>(); + Files.walkFileTree(dir, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { + found.add(file); + return FileVisitResult.CONTINUE; + } + }); + assertEquals("Unexpected file count, found: [" + found + "].", expectedCount, found.size()); + } + public static int numberOfFiles(Path dir) throws IOException { final AtomicInteger count = new AtomicInteger(); Files.walkFileTree(dir, new SimpleFileVisitor() { diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 6f4f69ad67e88..4febd0695c936 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -238,8 +238,7 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS } protected BlobStore createTestBlobStore() throws IOException { - Settings settings = Settings.builder().build(); - return new FsBlobStore(settings, randomRepoPath()); + return new FsBlobStore(Settings.EMPTY, randomRepoPath(), false); } protected void randomCorruption(BlobContainer blobContainer, String blobName) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b118d3a3d4933..a273a67362dfd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -423,6 +423,7 @@ public void testSnapshotDuringNodeShutdown() throws Exception { logger.info("--> done"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39852") public void testSnapshotWithStuckNode() throws Exception { logger.info("--> start 2 nodes"); ArrayList nodes = new ArrayList<>(); @@ -457,7 +458,7 @@ public void testSnapshotWithStuckNode() throws Exception { // Remove it from the list of available nodes nodes.remove(blockedNode); - int numberOfFilesBeforeSnapshot = numberOfFiles(repo); + assertFileCount(repo, 0); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(false) @@ -493,8 +494,7 @@ public void testSnapshotWithStuckNode() throws Exception { // (2) index-0 (because we keep the previous version) and // (3) index-latest // (4) incompatible-snapshots - assertThat("not all files were deleted during snapshot cancellation", - numberOfFilesBeforeSnapshot, equalTo(numberOfFiles(repo) - 4)); + assertFileCount(repo, 4); logger.info("--> done"); } @@ -855,7 +855,6 @@ public void testMasterShutdownDuringSnapshot() throws Exception { assertEquals(0, snapshotInfo.failedShards()); } - public void testMasterAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> starting three master nodes and two data nodes"); internalCluster().startMasterOnlyNodes(3); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 5ca7cbc6aef50..ffb34900b2f43 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2210,10 +2210,11 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc { SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo") .setSnapshots("test-2").get().getSnapshots().get(0); + Settings settings = client.admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test"); List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { // we flush before the snapshot such that we have to process the segments_N files plus the .del file - if (INDEX_SOFT_DELETES_SETTING.get(indexSettings)) { + if (INDEX_SOFT_DELETES_SETTING.get(settings)) { // soft-delete generates DV files. assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); } else { @@ -2963,6 +2964,108 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { assertAcked(client().admin().cluster().prepareDeleteSnapshot("test-repo", snapshotInfo.snapshotId().getName()).get()); } + /** + * Tests that a shard snapshot with a corrupted shard index file can still be used for restore and incremental snapshots. + */ + public void testSnapshotWithCorruptedShardIndexFile() throws Exception { + final Client client = client(); + final Path repo = randomRepoPath(); + final String indexName = "test-idx"; + final int nDocs = randomIntBetween(1, 10); + + logger.info("--> creating index [{}] with [{}] documents in it", indexName, nDocs); + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + final IndexRequestBuilder[] documents = new IndexRequestBuilder[nDocs]; + for (int j = 0; j < nDocs; j++) { + documents[j] = client.prepareIndex(indexName, "_doc").setSource("foo", "bar"); + } + indexRandom(true, documents); + flushAndRefresh(); + + logger.info("--> creating repository"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo))); + + final String snapshot1 = "test-snap-1"; + logger.info("--> creating snapshot [{}]", snapshot1); + final SnapshotInfo snapshotInfo = client().admin().cluster().prepareCreateSnapshot("test-repo", snapshot1) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.failedShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.indices(), hasSize(1)); + + RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + Repository repository = service.repository("test-repo"); + + final RepositoryData repositoryData = getRepositoryData(repository); + final Map indexIds = repositoryData.getIndices(); + assertThat(indexIds.size(), equalTo(1)); + + final IndexId corruptedIndex = indexIds.get(indexName); + final Path shardIndexFile = repo.resolve("indices") + .resolve(corruptedIndex.getId()).resolve("0") + .resolve("index-0"); + + logger.info("--> truncating shard index file [{}]", shardIndexFile); + try (SeekableByteChannel outChan = Files.newByteChannel(shardIndexFile, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); + } + + logger.info("--> verifying snapshot state for [{}]", snapshot1); + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshot1)); + + logger.info("--> deleting index [{}]", indexName); + assertAcked(client().admin().indices().prepareDelete(indexName)); + + logger.info("--> restoring snapshot [{}]", snapshot1); + client().admin().cluster().prepareRestoreSnapshot("test-repo", snapshot1) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .get(); + ensureGreen(); + + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), nDocs); + + logger.info("--> indexing [{}] more documents into [{}]", nDocs, indexName); + for (int j = 0; j < nDocs; j++) { + documents[j] = client.prepareIndex(indexName, "_doc").setSource("foo2", "bar2"); + } + indexRandom(true, documents); + + final String snapshot2 = "test-snap-2"; + logger.info("--> creating snapshot [{}]", snapshot2); + final SnapshotInfo snapshotInfo2 = client().admin().cluster().prepareCreateSnapshot("test-repo", snapshot2) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.failedShards(), equalTo(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo2.indices(), hasSize(1)); + + logger.info("--> deleting index [{}]", indexName); + assertAcked(client().admin().indices().prepareDelete(indexName)); + + logger.info("--> restoring snapshot [{}]", snapshot2); + client().admin().cluster().prepareRestoreSnapshot("test-repo", snapshot2) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .get(); + + ensureGreen(); + + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), 2 * nDocs); + } + public void testCannotCreateSnapshotsWithSameName() throws Exception { final String repositoryName = "test-repo"; final String snapshotName = "test-snap"; diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java index b277de64a9d9f..92a61cda9e98d 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java @@ -19,8 +19,10 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.threadpool.ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING; import static org.hamcrest.CoreMatchers.equalTo; public class ThreadPoolTests extends ESTestCase { @@ -59,4 +61,10 @@ public void testAbsoluteTime() throws Exception { threadPool.close(); } } + + public void testEstimatedTimeIntervalSettingAcceptsOnlyZeroAndPositiveTime() { + Settings settings = Settings.builder().put("thread_pool.estimated_time_interval", -1).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> ESTIMATED_TIME_INTERVAL_SETTING.get(settings)); + assertEquals("failed to parse value [-1] for setting [thread_pool.estimated_time_interval], must be >= [0ms]", e.getMessage()); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java index 578521190e2ff..c1dd512e0232d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -55,7 +55,7 @@ public void createConnectionManager() { .build(); threadPool = new ThreadPool(settings); transport = mock(Transport.class); - connectionManager = new ConnectionManager(settings, transport, threadPool); + connectionManager = new ConnectionManager(settings, transport); TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 9eddac80a17c0..438be18b01b48 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -104,6 +104,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + private final ConnectionProfile profile = RemoteClusterService.buildConnectionProfileFromSettings(Settings.EMPTY, "cluster"); @Override public void tearDown() throws Exception { @@ -164,7 +165,7 @@ public static MockTransportService startTransport( builder.add(node); } ClusterState build = ClusterState.builder(clusterName).nodes(builder.build()).build(); - channel.sendResponse(new ClusterStateResponse(clusterName, build, 0L, false)); + channel.sendResponse(new ClusterStateResponse(clusterName, build, false)); }); newService.start(); newService.acceptIncomingRequests(); @@ -191,7 +192,8 @@ public void testRemoteProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -233,7 +235,8 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -286,7 +289,8 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -318,7 +322,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -346,7 +350,7 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -396,7 +400,7 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); if (rejectedNode.equals(seedNode)) { @@ -461,7 +465,8 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(Tuple.tuple(seedNode.toString(), () -> seedNode)), service, Integer.MAX_VALUE, n -> true, null, + profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); expectThrows( Exception.class, @@ -502,7 +507,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } }; - ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport, threadPool); + ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport, threadPool); @@ -559,7 +564,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -614,7 +619,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; @@ -694,7 +699,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -782,7 +787,7 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true, null)) { + seedNodes, service, maxNumConnections, n -> true, null, profile)) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); @@ -914,7 +919,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); assertFalse(connectionManager.nodeConnected(seedNode)); assertFalse(connectionManager.nodeConnected(discoverableNode)); @@ -964,7 +969,7 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { if (randomBoolean()) { updateSeedNodes(connection, seedNodes(seedNode)); } @@ -1012,7 +1017,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes, service, Integer.MAX_VALUE, n -> true, null, profile)) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -1100,7 +1105,7 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { + seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null, profile)) { ConnectionManager connectionManager = connection.getConnectionManager(); updateSeedNodes(connection, seedNodes(seedNode)); assertTrue(connectionManager.nodeConnected(seedNode)); @@ -1156,7 +1161,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } }; - ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport, threadPool); + ConnectionManager delegate = new ConnectionManager(Settings.EMPTY, service.transport); StubbableConnectionManager connectionManager = new StubbableConnectionManager(delegate, Settings.EMPTY, service.transport, threadPool); @@ -1214,7 +1219,7 @@ public void testLazyResolveTransportAddress() throws Exception { return seedNode; }); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, null)) { + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, null, profile)) { updateSeedNodes(connection, Arrays.asList(seedSupplier)); // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes // being called again so we try to resolve the same seed node's host twice @@ -1246,7 +1251,7 @@ public void testProxyMode() throws Exception { RemoteClusterAware.buildSeedNode("some-remote-cluster", "node_0:" + randomIntBetween(1, 10000), true)); assertEquals("node_0", seedSupplier.v2().get().getAttributes().get("server_name")); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, proxyAddress)) { + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true, proxyAddress, profile)) { updateSeedNodes(connection, Arrays.asList(seedSupplier), proxyAddress); assertEquals(2, connection.getNumNodesConnected()); assertNotNull(connection.getConnection(discoverableTransport.getLocalDiscoNode())); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 2407106273f3f..e24a099596d61 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; @@ -58,12 +59,12 @@ import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.instanceOf; public class RemoteClusterServiceTests extends ESTestCase { @@ -400,11 +401,7 @@ public void testCustomPingSchedule() throws IOException { TimeValue.timeValueSeconds(randomIntBetween(1, 10)); builder.put("cluster.remote.cluster_2.transport.ping_schedule", pingSchedule2); try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { - assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); - assertTrue(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(cluster1Seed.getAddress().toString()), null); - assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); RemoteClusterConnection remoteClusterConnection1 = service.getRemoteClusterConnection("cluster_1"); assertEquals(pingSchedule1, remoteClusterConnection1.getConnectionManager().getConnectionProfile().getPingInterval()); @@ -415,6 +412,40 @@ public void testCustomPingSchedule() throws IOException { } } + public void testChangeSettings() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT)) { + DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode(); + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, + threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + service.initializeRemoteClusters(); + RemoteClusterConnection remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); + Settings.Builder settingsChange = Settings.builder(); + TimeValue pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(6, 8)); + settingsChange.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule); + boolean compressionEnabled = true; + settingsChange.put("cluster.remote.cluster_1.transport.compress", compressionEnabled); + settingsChange.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); + service.updateRemoteCluster("cluster_1", settingsChange.build()); + assertBusy(remoteClusterConnection::isClosed); + + remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); + ConnectionProfile connectionProfile = remoteClusterConnection.getConnectionManager().getConnectionProfile(); + assertEquals(pingSchedule, connectionProfile.getPingInterval()); + assertEquals(compressionEnabled, connectionProfile.getCompressionEnabled()); + } + } + } + } + public void testRemoteNodeAttribute() throws IOException, InterruptedException { final Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); @@ -460,14 +491,14 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { service.updateRemoteCluster( "cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); assertTrue(service.isCrossClusterSearchEnabled()); @@ -525,14 +556,14 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { service.updateRemoteCluster( "cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); assertTrue(service.isCrossClusterSearchEnabled()); @@ -552,6 +583,7 @@ private ActionListener connectionListener(final CountDownLatch latch) { return ActionListener.wrap(x -> latch.countDown(), x -> fail()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41067") public void testCollectNodes() throws InterruptedException, IOException { final Settings settings = Settings.EMPTY; final List knownNodes_c1 = new CopyOnWriteArrayList<>(); @@ -595,17 +627,17 @@ public void testCollectNodes() throws InterruptedException, IOException { assertFalse(service.isCrossClusterSearchEnabled()); final CountDownLatch firstLatch = new CountDownLatch(1); - service.updateRemoteCluster( - "cluster_1", + + service.updateRemoteCluster("cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), null, - connectionListener(firstLatch)); + genericProfile("cluster_1"), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), null, - connectionListener(secondLatch)); + genericProfile("cluster_2"), connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); service.collectNodes(new HashSet<>(Arrays.asList("cluster_1", "cluster_2")), @@ -690,15 +722,10 @@ public void onFailure(Exception e) { failLatch.await(); assertNotNull(ex.get()); if (ex.get() instanceof IllegalStateException) { - assertThat(ex.get().getMessage(), anyOf(equalTo("no seed node left"), startsWith - ("No node available for cluster:"))); + assertThat(ex.get().getMessage(), equalTo("no seed node left")); } else { - if (ex.get() instanceof TransportException == false) { - // we have an issue for this see #25301 - logger.error("expected TransportException but got a different one see #25301", ex.get()); - } - assertTrue("expected TransportException but got a different one [" + ex.get().getClass().toString() + "]", - ex.get() instanceof TransportException); + assertThat(ex.get(), + either(instanceOf(TransportException.class)).or(instanceOf(NoSuchRemoteClusterException.class))); } } } @@ -851,6 +878,81 @@ public void testGetNodePredicatesCombination() { } } + public void testReconnectWhenSeedsNodesAreUpdated() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster_node_0 = startTransport("cluster_node_0", knownNodes, Version.CURRENT); + MockTransportService cluster_node_1 = startTransport("cluster_node_1", knownNodes, Version.CURRENT)) { + + final DiscoveryNode node0 = cluster_node_0.getLocalDiscoNode(); + final DiscoveryNode node1 = cluster_node_1.getLocalDiscoNode(); + knownNodes.add(node0); + knownNodes.add(node1); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = + MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + + final Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster_test.seeds", Collections.singletonList(node0.getAddress().toString())); + try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertTrue(service.isCrossClusterSearchEnabled()); + + final RemoteClusterConnection firstRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + + final CountDownLatch firstLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + Collections.singletonList(node0.getAddress().toString()), null, + genericProfile("cluster_test"), connectionListener(firstLatch)); + firstLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, firstRemoteClusterConnection.getNumNodesConnected()); + assertFalse(firstRemoteClusterConnection.isClosed()); + assertSame(firstRemoteClusterConnection, service.getRemoteClusterConnection("cluster_test")); + + final List newSeeds = new ArrayList<>(); + newSeeds.add(node1.getAddress().toString()); + if (randomBoolean()) { + newSeeds.add(node0.getAddress().toString()); + Collections.shuffle(newSeeds, random()); + } + + final CountDownLatch secondLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_test", + newSeeds, null, + genericProfile("cluster_test"), connectionListener(secondLatch)); + secondLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertBusy(() -> { + assertFalse(firstRemoteClusterConnection.isNodeConnected(node0)); + assertFalse(firstRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(0, firstRemoteClusterConnection.getNumNodesConnected()); + assertTrue(firstRemoteClusterConnection.isClosed()); + }); + + final RemoteClusterConnection secondRemoteClusterConnection = service.getRemoteClusterConnection("cluster_test"); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node0)); + assertTrue(secondRemoteClusterConnection.isNodeConnected(node1)); + assertEquals(2, secondRemoteClusterConnection.getNumNodesConnected()); + assertFalse(secondRemoteClusterConnection.isClosed()); + } + } + } + } + public void testRemoteClusterWithProxy() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService cluster_1_node0 = startTransport("cluster_1_node0", knownNodes, Version.CURRENT); @@ -916,7 +1018,7 @@ private static void updateRemoteCluster(RemoteClusterService service, String clu exceptionAtomicReference.set(x); latch.countDown(); }); - service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, listener); + service.updateRemoteCluster(clusterAlias, addresses, proxyAddress, genericProfile(clusterAlias), listener); latch.await(); if (exceptionAtomicReference.get() != null) { throw exceptionAtomicReference.get(); @@ -958,4 +1060,8 @@ public void testSkipUnavailable() { } } } + + private static ConnectionProfile genericProfile(String clusterName) { + return RemoteClusterService.buildConnectionProfileFromSettings(Settings.EMPTY, clusterName); + } } diff --git a/server/src/test/resources/format-v3-elasticsearch.keystore b/server/src/test/resources/format-v3-elasticsearch.keystore new file mode 100644 index 0000000000000..6b845c7e9d6ff Binary files /dev/null and b/server/src/test/resources/format-v3-elasticsearch.keystore differ diff --git a/settings.gradle b/settings.gradle index 18f5f63332e00..7532230e7b81e 100644 --- a/settings.gradle +++ b/settings.gradle @@ -18,15 +18,27 @@ List projects = [ 'distribution:archives:integ-test-zip', 'distribution:archives:oss-windows-zip', 'distribution:archives:windows-zip', + 'distribution:archives:oss-no-jdk-windows-zip', + 'distribution:archives:no-jdk-windows-zip', 'distribution:archives:oss-darwin-tar', 'distribution:archives:darwin-tar', + 'distribution:archives:oss-no-jdk-darwin-tar', + 'distribution:archives:no-jdk-darwin-tar', 'distribution:archives:oss-linux-tar', 'distribution:archives:linux-tar', + 'distribution:archives:oss-no-jdk-linux-tar', + 'distribution:archives:no-jdk-linux-tar', 'distribution:docker', + 'distribution:docker:oss-docker-build-context', + 'distribution:docker:docker-build-context', 'distribution:packages:oss-deb', 'distribution:packages:deb', + 'distribution:packages:oss-no-jdk-deb', + 'distribution:packages:no-jdk-deb', 'distribution:packages:oss-rpm', 'distribution:packages:rpm', + 'distribution:packages:oss-no-jdk-rpm', + 'distribution:packages:no-jdk-rpm', 'distribution:bwc:bugfix', 'distribution:bwc:maintenance', 'distribution:bwc:minor', diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 3f08ca7970ca7..5dab9d99915b5 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -19,24 +19,9 @@ apply plugin: 'elasticsearch.build' -versions << [ - 'hadoop2': '2.8.1' -] - -// we create MiniHdfsCluster with the hadoop artifact dependencies { - compile "org.apache.hadoop:hadoop-minicluster:${versions.hadoop2}" -} - -// for testing, until fixtures are actually debuggable. -// gradle hides *EVERYTHING* so you have no clue what went wrong. -task hdfs(type: JavaExec) { - classpath = sourceSets.test.compileClasspath + sourceSets.test.output - main = "hdfs.MiniHDFS" - args = [ 'build/fixtures/hdfsFixture' ] + compile "org.apache.hadoop:hadoop-minicluster:2.8.1" } -// just a test fixture: we aren't using jars in releases +test.enabled = false thirdPartyAudit.enabled = false -// TODO: add a simple HDFS client test for this fixture -unitTest.enabled = false diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index ce7401fe25cae..01315cdab01ca 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -98,7 +98,6 @@ public static void main(String[] args) throws Exception { UserGroupInformation.setConfiguration(cfg); - // TODO: remove hardcoded port! MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); if (secure) { builder.nameNodePort(9998); diff --git a/test/fixtures/krb5kdc-fixture/Dockerfile b/test/fixtures/krb5kdc-fixture/Dockerfile new file mode 100644 index 0000000000000..50de6334b9c78 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 +ADD . /fixture +RUN echo kerberos.build.elastic.co > /etc/hostname && echo "127.0.0.1 kerberos.build.elastic.co" >> /etc/hosts +RUN bash /fixture/src/main/resources/provision/installkdc.sh + +EXPOSE 88 +EXPOSE 88/udp + +CMD sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/Vagrantfile b/test/fixtures/krb5kdc-fixture/Vagrantfile deleted file mode 100644 index 72be4dad9cbe5..0000000000000 --- a/test/fixtures/krb5kdc-fixture/Vagrantfile +++ /dev/null @@ -1,53 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# This Vagrantfile exists to define a virtual machine running MIT's Kerberos 5 -# for usage as a testing fixture for the build process. -# -# In order to connect to the KDC process on this virtual machine, find and use -# the rendered krb5.conf file in the build output directory (build/conf). -# -# In order to provision principals in the KDC, use the provided addprinc.sh -# script with vagrant's ssh facility: -# -# vagrant ssh -c /vagrant/src/main/resources/provision/addprinc.sh principal -# -# You will find the newly created principal's keytab file in the build output -# directory (build/keytabs). Principal creation is idempotent, and will recopy -# existing user keytabs from the KDC if they already exist. - -Vagrant.configure("2") do |config| - - config.vm.define "krb5kdc" do |config| - config.vm.box = "elastic/ubuntu-14.04-x86_64" - end - - config.vm.hostname = "kerberos.build.elastic.co" - - if Vagrant.has_plugin?("vagrant-cachier") - config.cache.scope = :box - end - - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "tcp" - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "udp" - - config.vm.provision "shell", path: "src/main/resources/provision/installkdc.sh" - -end diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index 685483d534771..9787c3527af71 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -16,68 +16,41 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.build' - -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" -] - -String box = "krb5kdc" - -List defaultPrincipals = [ "elasticsearch" ] - -task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars +// installKDC uses tabs in it for the Kerberos ACL file. +// Ignore it for pattern checking. +forbiddenPatterns { + exclude "**/installkdc.sh" } -task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn update -} +List services = ["peppa", "hdfs"] -task addDefaultPrincipals { - dependsOn up +preProcessFixture.doLast { + // We need to create these up-front because if docker creates them they will be owned by root and we won't be + // able to clean them up + services.each { file("${buildDir}/shared/${it}").mkdirs() } } -for (String principal : defaultPrincipals) { - Task addTask = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn up +postProcessFixture { + inputs.dir("${buildDir}/shared") + services.each { service -> + File confTemplate = file("${buildDir}/shared/${service}/krb5.conf.template") + File confFile = file("${buildDir}/shared/${service}/krb5.conf") + outputs.file(confFile) + doLast { + assert confTemplate.exists() + String confContents = confTemplate.text + .replace("\${MAPPED_PORT}", "${ext."test.fixtures.${service}.udp.88"}") + confFile.text = confContents + } } - addDefaultPrincipals.dependsOn(addTask) } -task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'halt' - boxName box - environmentVars vagrantEnvVars -} - -task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'destroy' - args '-f' - boxName box - environmentVars vagrantEnvVars - dependsOn halt -} +// https://github.com/elastic/elasticsearch/issues/40624 +buildFixture.enabled = false -thirdPartyAudit.enabled = false -unitTest.enabled = false +project.ext.krb5Conf = { service -> file("$buildDir/shared/${service}/krb5.conf") } +project.ext.krb5Keytabs = { service, fileName -> file("$buildDir/shared/${service}/keytabs/${fileName}") } -// installKDC uses tabs in it for the Kerberos ACL file. -// Ignore it for pattern checking. -forbiddenPatterns { - exclude "**/installkdc.sh" -} +test.enabled = false diff --git a/test/fixtures/krb5kdc-fixture/docker-compose.yml b/test/fixtures/krb5kdc-fixture/docker-compose.yml new file mode 100644 index 0000000000000..4d018dd6c3e08 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/docker-compose.yml @@ -0,0 +1,24 @@ +version: '3' +services: + peppa: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/peppa.sh" + volumes: + - ./build/shared/peppa:/fixture/build + ports: + - "4444" + - "88/udp" + hdfs: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/hdfs.sh" + volumes: + - ./build/shared/hdfs:/fixture/build + ports: + - "4444" + - "88/udp" diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index d0d1570ae299a..9fc2a0735d666 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -19,6 +19,9 @@ set -e +krb5kdc +kadmind + if [[ $# -lt 1 ]]; then echo 'Usage: addprinc.sh principalName [password]' echo ' principalName user principal name without realm' @@ -30,7 +33,7 @@ PRINC="$1" PASSWD="$2" USER=$(echo $PRINC | tr "/" "_") -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties @@ -64,3 +67,9 @@ else sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" fi fi + +echo "Copying conf to local" +# make the configuration available externally +cp -v $LOCALSTATEDIR/krb5.conf $BUILD_DIR/krb5.conf.template +# We are running as root in the container, allow non root users running the container to be able to clean these up +chmod -R 777 $BUILD_DIR \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh new file mode 100644 index 0000000000000..ef5bba076444c --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +addprinc.sh "elasticsearch" +addprinc.sh "hdfs/hdfs.build.elastic.co" + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh index 2dc8ed92c9462..51af7984ce476 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh @@ -22,32 +22,15 @@ set -e # KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html # and helpful input from https://help.ubuntu.com/community/Kerberos -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties -BUILD_DIR=$VDIR/build -CONF_DIR=$BUILD_DIR/conf -KEYTAB_DIR=$BUILD_DIR/keytabs LOCALSTATEDIR=/etc LOGDIR=/var/log/krb5 MARKER_FILE=/etc/marker -# Output location for our rendered configuration files and keytabs -mkdir -p $BUILD_DIR -rm -rf $BUILD_DIR/* -mkdir -p $CONF_DIR -mkdir -p $KEYTAB_DIR - -if [ -f $MARKER_FILE ]; then - echo "Already provisioned..." - echo "Recopying configuration files..." - cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf - cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf - exit 0; -fi - # Pull environment information REALM_NAME=$(cat $ENVPROP_FILE | grep realm= | cut -d '=' -f 2) KDC_NAME=$(cat $ENVPROP_FILE | grep kdc= | cut -d '=' -f 2) @@ -60,7 +43,7 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf + # Transfer and interpolate the kdc.conf mkdir -p $LOCALSTATEDIR/krb5kdc @@ -69,7 +52,6 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf # Touch logging locations mkdir -p $LOGDIR @@ -112,9 +94,5 @@ EOF kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME" kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME" -# Start Kerberos Services -krb5kdc -kadmind - -# Mark that the vm is already provisioned -touch $MARKER_FILE \ No newline at end of file +# Create a link so addprinc.sh is on path +ln -s $PROV_DIR/addprinc.sh /usr/bin/ \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index e572c12e70957..9504b49bc7301 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -32,12 +32,8 @@ [realms] ${REALM_NAME} = { - kdc = ${KDC_NAME}:88 - kdc = ${KDC_NAME}:60088 - kdc = localhost:60088 - kdc = localhost:88 - kdc = 127.0.0.1:60088 kdc = 127.0.0.1:88 + kdc = 127.0.0.1:${MAPPED_PORT} admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} } diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh new file mode 100644 index 0000000000000..815a9e94e8cb5 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +addprinc.sh elasticsearch +addprinc.sh HTTP/localhost +addprinc.sh peppa +addprinc.sh george dino + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/fixtures/old-elasticsearch/build.gradle b/test/fixtures/old-elasticsearch/build.gradle index 82948a0b3b05a..5cfc02bbba3c6 100644 --- a/test/fixtures/old-elasticsearch/build.gradle +++ b/test/fixtures/old-elasticsearch/build.gradle @@ -24,7 +24,7 @@ a "ports" file with the port on which Elasticsearch is running. """ apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { // Just for the constants.... diff --git a/test/framework/build.gradle b/test/framework/build.gradle index fbc87988837cc..094c574e05188 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask; dependencies { compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" @@ -64,12 +63,12 @@ thirdPartyAudit.ignoreMissingClasses ( 'org.jmock.core.Constraint' ) -unitTest { +test { systemProperty 'tests.gradle_index_compat_versions', bwcVersions.indexCompatible.join(',') systemProperty 'tests.gradle_wire_compat_versions', bwcVersions.wireCompatible.join(',') systemProperty 'tests.gradle_unreleased_versions', bwcVersions.unreleased.join(',') } -task integTest(type: RandomizedTestingTask) { +task integTest(type: Test) { include "**/*IT.class" } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 1f934aa00b3b8..e035b779b3f02 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -20,9 +20,8 @@ package org.elasticsearch.bootstrap; import com.carrotsearch.randomizedtesting.RandomizedRunner; - -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -30,6 +29,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.IfConfig; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.PluginInfo; import org.elasticsearch.secure_sm.SecureSM; import org.junit.Assert; @@ -37,6 +37,7 @@ import java.io.InputStream; import java.net.SocketPermission; import java.net.URL; +import java.nio.file.Files; import java.nio.file.Path; import java.security.Permission; import java.security.Permissions; @@ -79,8 +80,10 @@ public class BootstrapForTesting { } // just like bootstrap, initialize natives, then SM + final boolean memoryLock = + BootstrapSettings.MEMORY_LOCK_SETTING.get(Settings.EMPTY); // use the default bootstrap.memory_lock setting final boolean systemCallFilter = Booleans.parseBoolean(System.getProperty("tests.system_call_filter", "true")); - Bootstrap.initializeNatives(javaTmpDir, true, systemCallFilter, true); + Bootstrap.initializeNatives(javaTmpDir, memoryLock, systemCallFilter, true); // initialize probes Bootstrap.initializeProbes(); @@ -250,9 +253,12 @@ static Set parseClassPathWithSymlinks() throws Exception { Set raw = JarHell.parseClassPath(); Set cooked = new HashSet<>(raw.size()); for (URL url : raw) { - boolean added = cooked.add(PathUtils.get(url.toURI()).toRealPath().toUri().toURL()); - if (added == false) { - throw new IllegalStateException("Duplicate in classpath after resolving symlinks: " + url); + Path path = PathUtils.get(url.toURI()); + if (Files.exists(path)) { + boolean added = cooked.add(path.toRealPath().toUri().toURL()); + if (added == false) { + throw new IllegalStateException("Duplicate in classpath after resolving symlinks: " + url); + } } } return raw; diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index e09455b55bd52..c6b3743ab28cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -27,6 +27,8 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; @@ -53,6 +55,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -126,6 +129,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; public abstract class EngineTestCase extends ESTestCase { @@ -254,18 +258,22 @@ public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) { @After public void tearDown() throws Exception { super.tearDown(); - if (engine != null && engine.isClosed.get() == false) { - engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); - } - if (replicaEngine != null && replicaEngine.isClosed.get() == false) { - replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); + try { + if (engine != null && engine.isClosed.get() == false) { + engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); + assertMaxSeqNoInCommitUserData(engine); + } + if (replicaEngine != null && replicaEngine.isClosed.get() == false) { + replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); + assertMaxSeqNoInCommitUserData(replicaEngine); + } + assertThat(engine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + assertThat(replicaEngine.config().getCircuitBreakerService().getBreaker(CircuitBreaker.ACCOUNTING).getUsed(), equalTo(0L)); + } finally { + IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } - IOUtils.close( - replicaEngine, storeReplica, - engine, store); - terminate(threadPool); } @@ -509,7 +517,7 @@ private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFact } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); - internalEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + internalEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); internalEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); return internalEngine; } @@ -997,8 +1005,9 @@ public static List getDocIds(Engine engine, boolean refresh) } } } - docs.sort(Comparator.comparing(DocIdSeqNoAndTerm::getId) - .thenComparingLong(DocIdSeqNoAndTerm::getSeqNo).thenComparingLong(DocIdSeqNoAndTerm::getPrimaryTerm)); + docs.sort(Comparator.comparingLong(DocIdSeqNoAndTerm::getSeqNo) + .thenComparingLong(DocIdSeqNoAndTerm::getPrimaryTerm) + .thenComparing((DocIdSeqNoAndTerm::getId))); return docs; } } @@ -1066,6 +1075,21 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e } } + /** + * Asserts that the max_seq_no stored in the commit's user_data is never smaller than seq_no of any document in the commit. + */ + public static void assertMaxSeqNoInCommitUserData(Engine engine) throws Exception { + List commits = DirectoryReader.listCommits(engine.store.directory()); + for (IndexCommit commit : commits) { + try (DirectoryReader reader = DirectoryReader.open(commit)) { + AtomicLong maxSeqNoFromDocs = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + Lucene.scanSeqNosInReader(reader, 0, Long.MAX_VALUE, n -> maxSeqNoFromDocs.set(Math.max(n, maxSeqNoFromDocs.get()))); + assertThat(Long.parseLong(commit.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), + greaterThanOrEqualTo(maxSeqNoFromDocs.get())); + } + } + } + public static MapperService createMapperService(String type) throws IOException { IndexMetaData indexMetaData = IndexMetaData.builder("test") .settings(Settings.builder() @@ -1088,6 +1112,22 @@ public static Translog getTranslog(Engine engine) { return internalEngine.getTranslog(); } + /** + * Waits for all operations up to the provided sequence number to complete in the given internal engine. + * + * @param seqNo the sequence number that the checkpoint must advance to before this method returns + * @throws InterruptedException if the thread was interrupted while blocking on the condition + */ + public static void waitForOpsToComplete(InternalEngine engine, long seqNo) throws InterruptedException { + engine.getLocalCheckpointTracker().waitForOpsToComplete(seqNo); + } + + public static boolean hasSnapshottedCommits(Engine engine) { + assert engine instanceof InternalEngine : "only InternalEngines have snapshotted commits, got: " + engine.getClass(); + InternalEngine internalEngine = (InternalEngine) engine; + return internalEngine.hasSnapshottedCommits(); + } + public static final class PrimaryTermSupplier implements LongSupplier { private final AtomicLong term; diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index c396cdfe84570..8b984c22bfd0e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -67,6 +67,10 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; +import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; +import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; @@ -92,7 +96,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.BiFunction; -import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -177,9 +180,25 @@ protected class ReplicationGroup implements AutoCloseable, Iterable } }); + private final RetentionLeaseSyncer retentionLeaseSyncer = new RetentionLeaseSyncer() { + @Override + public void sync(ShardId shardId, RetentionLeases retentionLeases, ActionListener listener) { + syncRetentionLeases(shardId, retentionLeases, listener); + } + + @Override + public void backgroundSync(ShardId shardId, RetentionLeases retentionLeases) { + sync(shardId, retentionLeases, ActionListener.wrap( + r -> { }, + e -> { + throw new AssertionError("failed to backgroun sync retention lease", e); + })); + } + }; + protected ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); - primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}); + primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer); replicas = new CopyOnWriteArrayList<>(); this.indexMetaData = indexMetaData; updateAllocationIDsOnPrimary(); @@ -267,9 +286,7 @@ public synchronized int startReplicas(int numOfReplicasToStart) throws IOExcepti } public void startPrimary() throws IOException { - final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); - primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); - primary.recoverFromStore(); + recoverPrimary(primary); HashSet activeIds = new HashSet<>(); activeIds.addAll(activeIds()); activeIds.add(primary.routingEntry().allocationId().getId()); @@ -286,7 +303,7 @@ public void startPrimary() throws IOException { public IndexShard addReplica() throws IOException { final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false); final IndexShard replica = - newShard(replicaRouting, indexMetaData, null, getEngineFactory(replicaRouting), () -> {}); + newShard(replicaRouting, indexMetaData, null, getEngineFactory(replicaRouting), () -> {}, retentionLeaseSyncer); addReplica(replica); return replica; } @@ -302,6 +319,11 @@ assert shardRoutings().stream() updateAllocationIDsOnPrimary(); } + protected synchronized void recoverPrimary(IndexShard primary) { + final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); + primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); + primary.recoverFromStore(); + } public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException { final ShardRouting shardRouting = TestShardRouting.newShardRouting( @@ -312,7 +334,7 @@ public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardP final IndexShard newReplica = newShard(shardRouting, shardPath, indexMetaData, null, null, getEngineFactory(shardRouting), - () -> {}, EMPTY_EVENT_LISTENER); + () -> {}, retentionLeaseSyncer, EMPTY_EVENT_LISTENER); replicas.add(newReplica); if (replicationTargets != null) { replicationTargets.addReplica(newReplica); @@ -476,7 +498,7 @@ public Iterator iterator() { return Iterators.concat(replicas.iterator(), Collections.singleton(primary).iterator()); } - public IndexShard getPrimary() { + public synchronized IndexShard getPrimary() { return primary; } @@ -509,6 +531,38 @@ private synchronized void computeReplicationTargets() { private synchronized ReplicationTargets getReplicationTargets() { return replicationTargets; } + + protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { + RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(shardId, leases); + ActionListener wrappedListener = ActionListener.wrap( + r -> listener.onResponse(new ReplicationResponse()), listener::onFailure); + new SyncRetentionLeases(request, ReplicationGroup.this, wrappedListener).execute(); + } + + public synchronized RetentionLease addRetentionLease(String id, long retainingSequenceNumber, String source, + ActionListener listener) { + return getPrimary().addRetentionLease(id, retainingSequenceNumber, source, listener); + } + + public synchronized RetentionLease renewRetentionLease(String id, long retainingSequenceNumber, String source) { + return getPrimary().renewRetentionLease(id, retainingSequenceNumber, source); + } + + public synchronized void removeRetentionLease(String id, ActionListener listener) { + getPrimary().removeRetentionLease(id, listener); + } + + public void executeRetentionLeasesSyncRequestOnReplica(RetentionLeaseSyncAction.Request request, IndexShard replica) { + final PlainActionFuture acquirePermitFuture = new PlainActionFuture<>(); + replica.acquireReplicaOperationPermit(getPrimary().getOperationPrimaryTerm(), getPrimary().getGlobalCheckpoint(), + getPrimary().getMaxSeqNoOfUpdatesOrDeletes(), acquirePermitFuture, ThreadPool.Names.SAME, request); + try (Releasable ignored = acquirePermitFuture.actionGet()) { + replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); + replica.persistRetentionLeases(); + } catch (Exception e) { + throw new AssertionError("failed to execute retention lease request on replica [" + replica.routingEntry() + "]", e); + } + } } static final class ReplicationTargets { @@ -666,15 +720,12 @@ public void onFailure(Exception e) { } @Override - public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, - Runnable onSuccess, Consumer onPrimaryDemoted, - Consumer onIgnoredFailure) { + public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, ActionListener listener) { throw new UnsupportedOperationException("failing shard " + replica + " isn't supported. failure: " + message, exception); } @Override - public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, ActionListener listener) { throw new UnsupportedOperationException("can't mark " + shardId + ", aid [" + allocationId + "] as stale"); } } @@ -863,4 +914,26 @@ private void executeResyncOnReplica(IndexShard replica, ResyncReplicationRequest } TransportWriteActionTestHelper.performPostWriteActions(replica, request, location, logger); } + + class SyncRetentionLeases extends ReplicationAction< + RetentionLeaseSyncAction.Request, RetentionLeaseSyncAction.Request, RetentionLeaseSyncAction.Response> { + + SyncRetentionLeases(RetentionLeaseSyncAction.Request request, ReplicationGroup group, + ActionListener listener) { + super(request, listener, group, "sync-retention-leases"); + } + + @Override + protected PrimaryResult performOnPrimary(IndexShard primary, RetentionLeaseSyncAction.Request request) throws Exception { + primary.persistRetentionLeases(); + return new PrimaryResult(request, new RetentionLeaseSyncAction.Response()); + } + + @Override + protected void performOnReplica(RetentionLeaseSyncAction.Request request, IndexShard replica) throws Exception { + replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); + replica.persistRetentionLeases(); + } + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index f59ae8b9683ac..8d73a5ba4e467 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -250,7 +250,7 @@ protected IndexShard newShard( .settings(indexSettings) .primaryTerm(0, primaryTerm) .putMapping("_doc", "{ \"properties\": {} }"); - return newShard(shardRouting, metaData.build(), null, engineFactory, () -> {}, listeners); + return newShard(shardRouting, metaData.build(), null, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); } /** @@ -293,7 +293,8 @@ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, I @Nullable IndexSearcherWrapper searcherWrapper, Runnable globalCheckpointSyncer) throws IOException { ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING, primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); - return newShard(shardRouting, indexMetaData, searcherWrapper, new InternalEngineFactory(), globalCheckpointSyncer); + return newShard( + shardRouting, indexMetaData, searcherWrapper, new InternalEngineFactory(), globalCheckpointSyncer, RetentionLeaseSyncer.EMPTY); } /** @@ -307,7 +308,7 @@ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, I protected IndexShard newShard( ShardRouting routing, IndexMetaData indexMetaData, EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException { - return newShard(routing, indexMetaData, null, engineFactory, () -> {}, listeners); + return newShard(routing, indexMetaData, null, engineFactory, () -> {}, RetentionLeaseSyncer.EMPTY, listeners); } /** @@ -323,6 +324,7 @@ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, + RetentionLeaseSyncer retentionLeaseSyncer, IndexingOperationListener... listeners) throws IOException { // add node id as name to settings for proper logging @@ -330,7 +332,7 @@ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); return newShard(routing, shardPath, indexMetaData, null, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, - EMPTY_EVENT_LISTENER, listeners); + retentionLeaseSyncer, EMPTY_EVENT_LISTENER, listeners); } /** @@ -348,7 +350,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe @Nullable CheckedFunction storeProvider, @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, - Runnable globalCheckpointSyncer, + Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); @@ -386,7 +388,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe Collections.emptyList(), Arrays.asList(listeners), globalCheckpointSyncer, - RetentionLeaseSyncer.EMPTY, + retentionLeaseSyncer, breakerService); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; @@ -438,6 +440,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Engin null, engineFactory, current.getGlobalCheckpointSyncer(), + current.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, listeners); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 2b8fba34c2f3e..8187a46fa7425 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -23,9 +23,15 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -43,6 +49,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; @@ -87,8 +94,8 @@ public void testSnapshotAndRestore() throws Exception { int[] docCounts = new int[indexCount]; String[] indexNames = generateRandomNames(indexCount); for (int i = 0; i < indexCount; i++) { - logger.info("--> create random index {} with {} records", indexNames[i], docCounts[i]); docCounts[i] = iterations(10, 1000); + logger.info("--> create random index {} with {} records", indexNames[i], docCounts[i]); addRandomDocuments(indexNames[i], docCounts[i]); assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]); } @@ -267,6 +274,58 @@ public void testIndicesDeletedFromRepository() throws Exception { } } + public void testRetentionLeasesClearedOnRestore() throws Exception { + final String repoName = randomAsciiName(); + logger.info("--> creating repository {}", repoName); + createAndCheckTestRepository(repoName); + + final String indexName = randomAsciiName(); + final int shardCount = randomIntBetween(1, 5); + assertAcked(client().admin().indices().prepareCreate(indexName).setSettings( + Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardCount)).get()); + final ShardId shardId = new ShardId(resolveIndex(indexName), randomIntBetween(0, shardCount - 1)); + + final int snapshotDocCount = iterations(10, 1000); + logger.info("--> indexing {} docs into {}", snapshotDocCount, indexName); + addRandomDocuments(indexName, snapshotDocCount); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount); + + final String leaseId = randomAsciiName(); + logger.info("--> adding retention lease with id {} to {}", leaseId, shardId); + client().execute(RetentionLeaseActions.Add.INSTANCE, new RetentionLeaseActions.AddRequest( + shardId, leaseId, RETAIN_ALL, "test")).actionGet(); + + final ShardStats shardStats = Arrays.stream(client().admin().indices().prepareStats(indexName).get().getShards()) + .filter(s -> s.getShardRouting().shardId().equals(shardId)).findFirst().get(); + final RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); + assertTrue(shardStats + ": " + retentionLeases, retentionLeases.contains(leaseId)); + + final String snapshotName = randomAsciiName(); + logger.info("--> create snapshot {}:{}", repoName, snapshotName); + assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true).setIndices(indexName)); + + if (randomBoolean()) { + final int extraDocCount = iterations(10, 1000); + logger.info("--> indexing {} extra docs into {}", extraDocCount, indexName); + addRandomDocuments(indexName, extraDocCount); + } + + logger.info("--> close index {}", indexName); + assertAcked(client().admin().indices().prepareClose(indexName)); + + logger.info("--> restore index {} from snapshot", indexName); + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true)); + + ensureGreen(); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount); + + final RetentionLeases restoredRetentionLeases = Arrays.stream(client().admin().indices().prepareStats(indexName).get() + .getShards()).filter(s -> s.getShardRouting().shardId().equals(shardId)).findFirst().get() + .getRetentionLeaseStats().retentionLeases(); + assertFalse(restoredRetentionLeases.toString() + " has no " + leaseId, restoredRetentionLeases.contains(leaseId)); + } + protected void addRandomDocuments(String name, int numDocs) throws ExecutionException, InterruptedException { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 4582e27d027da..badfddfd6af65 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -124,6 +124,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MockFieldFilterPlugin; +import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -466,6 +467,10 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde RandomNumbers.randomIntBetween(random, 1, 15) + "ms"); } + if (randomBoolean()) { + builder.put(Store.FORCE_RAM_TERM_DICT.getKey(), true); + } + return builder; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 4443416208321..343614081c48c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -127,6 +128,9 @@ public void tearDown() throws Exception { metaData.persistentSettings().size(), equalTo(0)); assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().keySet(), metaData.transientSettings().size(), equalTo(0)); + GetIndexResponse indices = client().admin().indices().prepareGetIndex().addIndices("*").get(); + assertThat("test leaves indices that were not deleted: " + Strings.arrayToCommaDelimitedString(indices.indices()), + indices.indices(), equalTo(Strings.EMPTY_ARRAY)); if (resetNodeAfterTest()) { assert NODE != null; stopNode(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index a36018921e9f4..7ce82163d7224 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -327,6 +327,16 @@ public static void restoreContentType() { Requests.INDEX_CONTENT_TYPE = XContentType.JSON; } + @BeforeClass + public static void ensureSupportedLocale() { + if (isUnusableLocale()) { + Logger logger = LogManager.getLogger(ESTestCase.class); + logger.warn("Attempting to run tests in an unusable locale in a FIPS JVM. Certificate expiration validation will fail, " + + "switching to English. See: https://github.com/bcgit/bc-java/issues/405"); + Locale.setDefault(Locale.ENGLISH); + } + } + @Before public final void before() { logger.info("{}before test", getTestParamsForLogging()); @@ -1419,6 +1429,12 @@ public TestAnalysis(IndexAnalyzers indexAnalyzers, } } + private static boolean isUnusableLocale() { + return inFipsJvm() && (Locale.getDefault().toLanguageTag().equals("th-TH") + || Locale.getDefault().toLanguageTag().equals("ja-JP-u-ca-japanese-x-lvariant-JP") + || Locale.getDefault().toLanguageTag().equals("th-TH-u-nu-thai-x-lvariant-TH")); + } + public static boolean inFipsJvm() { return Security.getProviders()[0].getName().toLowerCase(Locale.ROOT).contains("fips"); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 7d9181909e293..d8df3d0b0ac78 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -87,6 +87,7 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -110,7 +111,6 @@ import org.elasticsearch.transport.MockTransportClient; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; -import org.junit.Assert; import java.io.Closeable; import java.io.IOException; @@ -153,8 +153,8 @@ import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; import static org.elasticsearch.discovery.DiscoveryModule.ZEN_DISCOVERY_TYPE; import static org.elasticsearch.discovery.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING; -import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.elasticsearch.test.ESTestCase.awaitBusy; import static org.elasticsearch.test.ESTestCase.getTestTransportType; @@ -186,6 +186,16 @@ public final class InternalTestCluster extends TestCluster { private final Logger logger = LogManager.getLogger(getClass()); + private static final Predicate DATA_NODE_PREDICATE = + nodeAndClient -> DiscoveryNode.isDataNode(nodeAndClient.node.settings()); + + private static final Predicate NO_DATA_NO_MASTER_PREDICATE = nodeAndClient -> + DiscoveryNode.isMasterNode(nodeAndClient.node.settings()) == false + && DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false; + + private static final Predicate MASTER_NODE_PREDICATE = + nodeAndClient -> DiscoveryNode.isMasterNode(nodeAndClient.node.settings()); + public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; @@ -196,8 +206,10 @@ public final class InternalTestCluster extends TestCluster { static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0; static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1; - /* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */ - private final NavigableMap nodes = new TreeMap<>(); + /* Sorted map to make traverse order reproducible. + * The map of nodes is never mutated so individual reads are safe without synchronization. + * Updates are intended to follow a copy-on-write approach. */ + private volatile NavigableMap nodes = Collections.emptyNavigableMap(); private final Set dataDirToClean = new HashSet<>(); @@ -207,7 +219,7 @@ public final class InternalTestCluster extends TestCluster { private final Settings defaultSettings; - private AtomicInteger nextNodeId = new AtomicInteger(0); + private final AtomicInteger nextNodeId = new AtomicInteger(0); /* Each shared node has a node seed that is used to start up the node and get default settings * this is important if a node is randomly shut down in a test since the next test relies on a @@ -239,7 +251,7 @@ public final class InternalTestCluster extends TestCluster { private final Path baseDir; private ServiceDisruptionScheme activeDisruptionScheme; - private Function clientWrapper; + private final Function clientWrapper; private int bootstrapMasterNodeIndex = -1; @@ -404,10 +416,6 @@ private static boolean usingZen1(Settings settings) { return ZEN_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(settings)); } - public int getBootstrapMasterNodeIndex() { - return bootstrapMasterNodeIndex; - } - /** * Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)} * for the description of how this field is used. @@ -459,7 +467,7 @@ public Collection> getPlugins() { return plugins; } - private Settings getRandomNodeSettings(long seed) { + private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); @@ -544,8 +552,8 @@ private void ensureOpen() { } } - private synchronized NodeAndClient getOrBuildRandomNode() { - ensureOpen(); + private NodeAndClient getOrBuildRandomNode() { + assert Thread.holdsLock(this); final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); if (randomNodeAndClient != null) { return randomNodeAndClient; @@ -565,11 +573,10 @@ private synchronized NodeAndClient getOrBuildRandomNode() { return buildNode; } - private synchronized NodeAndClient getRandomNodeAndClient() { + private NodeAndClient getRandomNodeAndClient() { return getRandomNodeAndClient(nc -> true); } - private synchronized NodeAndClient getRandomNodeAndClient(Predicate predicate) { ensureOpen(); List values = nodes.values().stream().filter(predicate).collect(Collectors.toList()); @@ -611,7 +618,7 @@ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException { final Stream collection = n == 0 ? nodes.values().stream() : nodes.values().stream() - .filter(new DataNodePredicate().and(new NodeNamePredicate(getMasterName()).negate())); + .filter(DATA_NODE_PREDICATE.and(new NodeNamePredicate(getMasterName()).negate())); final Iterator values = collection.iterator(); logger.info("changing cluster size from {} data nodes to {}", size, n); @@ -675,20 +682,19 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin * the method will return the existing one * @param onTransportServiceStarted callback to run when transport service is started */ - private NodeAndClient buildNode(int nodeId, Settings settings, + private synchronized NodeAndClient buildNode(int nodeId, Settings settings, boolean reuseExisting, Runnable onTransportServiceStarted) { assert Thread.holdsLock(this); ensureOpen(); Collection> plugins = getPlugins(); String name = settings.get("node.name"); - if (reuseExisting && nodes.containsKey(name)) { + final NodeAndClient nodeAndClient = nodes.get(name); + if (reuseExisting && nodeAndClient != null) { onTransportServiceStarted.run(); // reusing an existing node implies its transport service already started - return nodes.get(name); - } else { - assert reuseExisting == true || nodes.containsKey(name) == false : - "node name [" + name + "] already exists but not allowed to use it"; + return nodeAndClient; } + assert reuseExisting == true || nodeAndClient == null : "node name [" + name + "] already exists but not allowed to use it"; SecureSettings secureSettings = Settings.builder().put(settings).getSecureSettings(); if (secureSettings instanceof MockSecureSettings) { @@ -725,7 +731,7 @@ private String buildNodeName(int id, Settings settings) { /** * returns a suffix string based on the node role. If no explicit role is defined, the suffix will be empty */ - private String getRoleSuffix(Settings settings) { + private static String getRoleSuffix(Settings settings) { String suffix = ""; if (Node.NODE_MASTER_SETTING.exists(settings) && Node.NODE_MASTER_SETTING.get(settings)) { suffix = suffix + Role.MASTER.getAbbreviation(); @@ -752,37 +758,32 @@ public synchronized Client client() { * Returns a node client to a data node in the cluster. * Note: use this with care tests should not rely on a certain nodes client. */ - public synchronized Client dataNodeClient() { - ensureOpen(); + public Client dataNodeClient() { /* Randomly return a client to one of the nodes in the cluster */ - return getRandomNodeAndClient(new DataNodePredicate()).client(random); + return getRandomNodeAndClient(DATA_NODE_PREDICATE).client(random); } /** * Returns a node client to the current master node. * Note: use this with care tests should not rely on a certain nodes client. */ - public synchronized Client masterClient() { - ensureOpen(); + public Client masterClient() { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName())); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client master is requested } - Assert.fail("No master client found"); - return null; // can't happen + throw new AssertionError("No master client found"); } /** * Returns a node client to random node but not the master. This method will fail if no non-master client is available. */ - public synchronized Client nonMasterClient() { - ensureOpen(); + public Client nonMasterClient() { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getMasterName()).negate()); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested } - Assert.fail("No non-master client found"); - return null; // can't happen + throw new AssertionError("No non-master client found"); } /** @@ -790,14 +791,14 @@ public synchronized Client nonMasterClient() { */ public synchronized Client coordOnlyNodeClient() { ensureOpen(); - NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new NoDataNoMasterNodePredicate()); + NodeAndClient randomNodeAndClient = getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE); if (randomNodeAndClient != null) { return randomNodeAndClient.client(random); } int nodeId = nextNodeId.getAndIncrement(); Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY); startCoordinatingOnlyNode(settings); - return getRandomNodeAndClient(new NoDataNoMasterNodePredicate()).client(random); + return getRandomNodeAndClient(NO_DATA_NO_MASTER_PREDICATE).client(random); } public synchronized String startCoordinatingOnlyNode(Settings settings) { @@ -811,7 +812,6 @@ public synchronized String startCoordinatingOnlyNode(Settings settings) { * Returns a transport client */ public synchronized Client transportClient() { - ensureOpen(); // randomly return a transport client going to one of the nodes in the cluster return getOrBuildRandomNode().transportClient(); } @@ -819,39 +819,39 @@ public synchronized Client transportClient() { /** * Returns a node client to a given node. */ - public synchronized Client client(String nodeName) { - ensureOpen(); + public Client client(String nodeName) { NodeAndClient nodeAndClient = nodes.get(nodeName); if (nodeAndClient != null) { return nodeAndClient.client(random); } - Assert.fail("No node found with name: [" + nodeName + "]"); - return null; // can't happen + throw new AssertionError("No node found with name: [" + nodeName + "]"); } /** * Returns a "smart" node client to a random node in the cluster */ - public synchronized Client smartClient() { + public Client smartClient() { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(); if (randomNodeAndClient != null) { return randomNodeAndClient.nodeClient(); } - Assert.fail("No smart client found"); - return null; // can't happen + throw new AssertionError("No smart client found"); } @Override - public synchronized void close() { + public synchronized void close() throws IOException { if (this.open.compareAndSet(true, false)) { if (activeDisruptionScheme != null) { activeDisruptionScheme.testClusterClosed(); activeDisruptionScheme = null; } - IOUtils.closeWhileHandlingException(nodes.values()); - nodes.clear(); - executor.shutdownNow(); + try { + IOUtils.close(nodes.values()); + } finally { + nodes = Collections.emptyNavigableMap(); + executor.shutdownNow(); + } } } @@ -871,7 +871,7 @@ private final class NodeAndClient implements Closeable { this.name = name; this.originalNodeSettings = originalNodeSettings; this.nodeAndClientId = nodeAndClientId; - markNodeDataDirsAsNotEligableForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(node); } Node node() { @@ -894,9 +894,6 @@ public boolean isMasterEligible() { } Client client(Random random) { - if (closed.get()) { - throw new RuntimeException("already closed"); - } double nextDouble = random.nextDouble(); if (nextDouble < transportClientRatio) { if (logger.isTraceEnabled()) { @@ -923,22 +920,32 @@ Client transportClient() { } private Client getOrBuildNodeClient() { - if (nodeClient == null) { - nodeClient = node.client(); + synchronized (InternalTestCluster.this) { + if (closed.get()) { + throw new RuntimeException("already closed"); + } + if (nodeClient == null) { + nodeClient = node.client(); + } + return clientWrapper.apply(nodeClient); } - return clientWrapper.apply(nodeClient); } private Client getOrBuildTransportClient() { - if (transportClient == null) { - /* don't sniff client for now - doesn't work will all tests - * since it might throw NoNodeAvailableException if nodes are - * shut down. we first need support of transportClientRatio - * as annotations or so */ - transportClient = new TransportClientFactory(false, nodeConfigurationSource.transportClientSettings(), + synchronized (InternalTestCluster.this) { + if (closed.get()) { + throw new RuntimeException("already closed"); + } + if (transportClient == null) { + /* don't sniff client for now - doesn't work will all tests + * since it might throw NoNodeAvailableException if nodes are + * shut down. we first need support of transportClientRatio + * as annotations or so */ + transportClient = new TransportClientFactory(nodeConfigurationSource.transportClientSettings(), baseDir, nodeConfigurationSource.transportClientPlugins()).client(node, clusterName); + } + return clientWrapper.apply(transportClient); } - return clientWrapper.apply(transportClient); } void resetClient() { @@ -1030,11 +1037,12 @@ public void afterStart() { } }); closed.set(false); - markNodeDataDirsAsNotEligableForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(node); } @Override public void close() throws IOException { + assert Thread.holdsLock(InternalTestCluster.this); try { resetClient(); } finally { @@ -1043,18 +1051,32 @@ public void close() throws IOException { node.close(); } } + + private void markNodeDataDirsAsPendingForWipe(Node node) { + assert Thread.holdsLock(InternalTestCluster.this); + NodeEnvironment nodeEnv = node.getNodeEnvironment(); + if (nodeEnv.hasNodeFile()) { + dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); + } + } + + private void markNodeDataDirsAsNotEligibleForWipe(Node node) { + assert Thread.holdsLock(InternalTestCluster.this); + NodeEnvironment nodeEnv = node.getNodeEnvironment(); + if (nodeEnv.hasNodeFile()) { + dataDirToClean.removeAll(Arrays.asList(nodeEnv.nodeDataPaths())); + } + } } public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_"; - static class TransportClientFactory { - private final boolean sniff; + private static class TransportClientFactory { private final Settings settings; private final Path baseDir; private final Collection> plugins; - TransportClientFactory(boolean sniff, Settings settings, Path baseDir, Collection> plugins) { - this.sniff = sniff; + TransportClientFactory(Settings settings, Path baseDir, Collection> plugins) { this.settings = settings != null ? settings : Settings.EMPTY; this.baseDir = baseDir; this.plugins = plugins; @@ -1067,7 +1089,7 @@ public Client client(Node node, String clusterName) { .put("client.transport.nodes_sampler_interval", "1s") .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) .put("node.name", TRANSPORT_CLIENT_PREFIX + node.settings().get("node.name")) - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName).put("client.transport.sniff", sniff) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName).put("client.transport.sniff", false) .put("logger.prefix", nodeSettings.get("logger.prefix", "")) .put("logger.level", nodeSettings.get("logger.level", "INFO")) .put(settings); @@ -1116,8 +1138,7 @@ private synchronized void reset(boolean wipeData) throws IOException { // trash all nodes with id >= sharedNodesSeeds.length - they are non shared final List toClose = new ArrayList<>(); - for (Iterator iterator = nodes.values().iterator(); iterator.hasNext();) { - NodeAndClient nodeAndClient = iterator.next(); + for (NodeAndClient nodeAndClient : nodes.values()) { if (nodeAndClient.nodeAndClientId() >= sharedNodesSeeds.length) { logger.debug("Close Node [{}] not shared", nodeAndClient.name); toClose.add(nodeAndClient); @@ -1195,7 +1216,7 @@ private synchronized void reset(boolean wipeData) throws IOException { nextNodeId.set(newSize); assert size() == newSize; - if (newSize > 0) { + if (autoManageMinMasterNodes && newSize > 0) { validateClusterFormed(); } logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", @@ -1209,7 +1230,7 @@ public synchronized void validateClusterFormed() { } /** ensure a cluster is formed with all published nodes, but do so by using the client of the specified node */ - public synchronized void validateClusterFormed(String viaNode) { + private synchronized void validateClusterFormed(String viaNode) { Set expectedNodes = new HashSet<>(); for (NodeAndClient nodeAndClient : nodes.values()) { expectedNodes.add(getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode()); @@ -1238,7 +1259,7 @@ public synchronized void validateClusterFormed(String viaNode) { } @Override - public synchronized void afterTest() throws IOException { + public synchronized void afterTest() { wipePendingDataDirectories(); randomlyResetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */ } @@ -1254,6 +1275,7 @@ public void beforeIndexDeletion() throws Exception { //check that shards that have same sync id also contain same number of documents assertSameSyncIdSameDocs(); assertOpenTranslogReferences(); + assertNoSnapshottedIndexCommit(); } private void assertSameSyncIdSameDocs() { @@ -1286,8 +1308,7 @@ private void assertSameSyncIdSameDocs() { private void assertNoPendingIndexOperations() throws Exception { assertBusy(() -> { - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { + for (NodeAndClient nodeAndClient : nodes.values()) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { @@ -1295,7 +1316,7 @@ private void assertNoPendingIndexOperations() throws Exception { if (operations.size() > 0) { throw new AssertionError( "shard " + indexShard.shardId() + " on node [" + nodeAndClient.name + "] has pending operations:\n --> " + - operations.stream().collect(Collectors.joining("\n --> ")) + String.join("\n --> ", operations) ); } } @@ -1306,8 +1327,7 @@ private void assertNoPendingIndexOperations() throws Exception { private void assertOpenTranslogReferences() throws Exception { assertBusy(() -> { - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { + for (NodeAndClient nodeAndClient : nodes.values()) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { @@ -1321,6 +1341,27 @@ private void assertOpenTranslogReferences() throws Exception { } } } + }, 60, TimeUnit.SECONDS); + } + + private void assertNoSnapshottedIndexCommit() throws Exception { + assertBusy(() -> { + for (NodeAndClient nodeAndClient : nodes.values()) { + IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + try { + Engine engine = IndexShardTestCase.getEngine(indexShard); + if (engine instanceof InternalEngine) { + assertFalse(indexShard.routingEntry().toString() + " has unreleased snapshotted index commits", + EngineTestCase.hasSnapshottedCommits(engine)); + } + } catch (AlreadyClosedException ignored) { + + } + } + } + } }); } @@ -1329,8 +1370,7 @@ private void assertOpenTranslogReferences() throws Exception { * This assertion might be expensive, thus we prefer not to execute on every test but only interesting tests. */ public void assertConsistentHistoryBetweenTranslogAndLuceneIndex() throws IOException { - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { + for (NodeAndClient nodeAndClient : nodes.values()) { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { @@ -1436,6 +1476,7 @@ public void assertSameDocIdsOnShards() throws Exception { } private void randomlyResetClients() { + assert Thread.holdsLock(this); // only reset the clients on nightly tests, it causes heavy load... if (RandomizedTest.isNightly() && rarely(random)) { final Collection nodesAndClients = nodes.values(); @@ -1462,22 +1503,6 @@ public synchronized void wipePendingDataDirectories() { } } - private void markNodeDataDirsAsPendingForWipe(Node node) { - assert Thread.holdsLock(this); - NodeEnvironment nodeEnv = node.getNodeEnvironment(); - if (nodeEnv.hasNodeFile()) { - dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); - } - } - - private void markNodeDataDirsAsNotEligableForWipe(Node node) { - assert Thread.holdsLock(this); - NodeEnvironment nodeEnv = node.getNodeEnvironment(); - if (nodeEnv.hasNodeFile()) { - dataDirToClean.removeAll(Arrays.asList(nodeEnv.nodeDataPaths())); - } - } - /** * Returns a reference to a random node's {@link ClusterService} */ @@ -1488,26 +1513,22 @@ public ClusterService clusterService() { /** * Returns a reference to a node's {@link ClusterService}. If the given node is null, a random node will be selected. */ - public synchronized ClusterService clusterService(@Nullable String node) { + public ClusterService clusterService(@Nullable String node) { return getInstance(ClusterService.class, node); } /** * Returns an Iterable to all instances for the given class >T< across all nodes in the cluster. */ - public synchronized Iterable getInstances(Class clazz) { - List instances = new ArrayList<>(nodes.size()); - for (NodeAndClient nodeAndClient : nodes.values()) { - instances.add(getInstanceFromNode(clazz, nodeAndClient.node)); - } - return instances; + public Iterable getInstances(Class clazz) { + return nodes.values().stream().map(node -> getInstanceFromNode(clazz, node.node)).collect(Collectors.toList()); } /** * Returns an Iterable to all instances for the given class >T< across all data nodes in the cluster. */ - public synchronized Iterable getDataNodeInstances(Class clazz) { - return getInstances(clazz, new DataNodePredicate()); + public Iterable getDataNodeInstances(Class clazz) { + return getInstances(clazz, DATA_NODE_PREDICATE); } public synchronized T getCurrentMasterNodeInstance(Class clazz) { @@ -1518,11 +1539,11 @@ public synchronized T getCurrentMasterNodeInstance(Class clazz) { * Returns an Iterable to all instances for the given class >T< across all data and master nodes * in the cluster. */ - public synchronized Iterable getDataOrMasterNodeInstances(Class clazz) { - return getInstances(clazz, new DataNodePredicate().or(new MasterNodePredicate())); + public Iterable getDataOrMasterNodeInstances(Class clazz) { + return getInstances(clazz, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE)); } - private synchronized Iterable getInstances(Class clazz, Predicate predicate) { + private Iterable getInstances(Class clazz, Predicate predicate) { Iterable filteredNodes = nodes.values().stream().filter(predicate)::iterator; List instances = new ArrayList<>(); for (NodeAndClient nodeAndClient : filteredNodes) { @@ -1534,16 +1555,16 @@ private synchronized Iterable getInstances(Class clazz, Predicate T getInstance(Class clazz, final String node) { + public T getInstance(Class clazz, final String node) { return getInstance(clazz, nc -> node == null || node.equals(nc.name)); } - public synchronized T getDataNodeInstance(Class clazz) { - return getInstance(clazz, new DataNodePredicate()); + public T getDataNodeInstance(Class clazz) { + return getInstance(clazz, DATA_NODE_PREDICATE); } - public synchronized T getMasterNodeInstance(Class clazz) { - return getInstance(clazz, new MasterNodePredicate()); + public T getMasterNodeInstance(Class clazz) { + return getInstance(clazz, MASTER_NODE_PREDICATE); } private synchronized T getInstance(Class clazz, Predicate predicate) { @@ -1555,17 +1576,17 @@ private synchronized T getInstance(Class clazz, Predicate /** * Returns a reference to a random nodes instances of the given class >T< */ - public synchronized T getInstance(Class clazz) { + public T getInstance(Class clazz) { return getInstance(clazz, nc -> true); } - private synchronized T getInstanceFromNode(Class clazz, Node node) { + private static T getInstanceFromNode(Class clazz, Node node) { return node.injector().getInstance(clazz); } @Override - public synchronized int size() { - return this.nodes.size(); + public int size() { + return nodes.size(); } @Override @@ -1582,7 +1603,7 @@ public InetSocketAddress[] httpAddresses() { */ public synchronized boolean stopRandomDataNode() throws IOException { ensureOpen(); - NodeAndClient nodeAndClient = getRandomNodeAndClient(new DataNodePredicate()); + NodeAndClient nodeAndClient = getRandomNodeAndClient(DATA_NODE_PREDICATE); if (nodeAndClient != null) { logger.info("Closing random node [{}] ", nodeAndClient.name); stopNodesAndClient(nodeAndClient); @@ -1611,9 +1632,10 @@ public synchronized void stopCurrentMasterNode() throws IOException { ensureOpen(); assert size() > 0; String masterNodeName = getMasterName(); - assert nodes.containsKey(masterNodeName); + final NodeAndClient masterNode = nodes.get(masterNodeName); + assert masterNode != null; logger.info("Closing master node [{}] ", masterNodeName); - stopNodesAndClient(nodes.get(masterNodeName)); + stopNodesAndClient(masterNode); } /** @@ -1667,14 +1689,15 @@ private void rebuildUnicastHostFiles(List newNodes) { // cannot be a synchronized method since it's called on other threads from within synchronized startAndPublishNodesAndClients() synchronized (discoveryFileMutex) { try { - Stream unicastHosts = Stream.concat(nodes.values().stream(), newNodes.stream()); + final Collection currentNodes = nodes.values(); + Stream unicastHosts = Stream.concat(currentNodes.stream(), newNodes.stream()); List discoveryFileContents = unicastHosts.map( - nac -> nac.node.injector().getInstance(TransportService.class) - ).filter(Objects::nonNull) + nac -> nac.node.injector().getInstance(TransportService.class) + ).filter(Objects::nonNull) .map(TransportService::getLocalNode).filter(Objects::nonNull).filter(DiscoveryNode::isMasterNode) .map(n -> n.getAddress().toString()) .distinct().collect(Collectors.toList()); - Set configPaths = Stream.concat(nodes.values().stream(), newNodes.stream()) + Set configPaths = Stream.concat(currentNodes.stream(), newNodes.stream()) .map(nac -> nac.node.getEnvironment().configFile()).collect(Collectors.toSet()); logger.debug("configuring discovery with {} at {}", discoveryFileContents, configPaths); for (final Path configPath : configPaths) { @@ -1687,7 +1710,7 @@ private void rebuildUnicastHostFiles(List newNodes) { } } - private synchronized void stopNodesAndClient(NodeAndClient nodeAndClient) throws IOException { + private void stopNodesAndClient(NodeAndClient nodeAndClient) throws IOException { stopNodesAndClients(Collections.singleton(nodeAndClient)); } @@ -1696,7 +1719,7 @@ private synchronized void stopNodesAndClients(Collection nodeAndC for (NodeAndClient nodeAndClient: nodeAndClients) { removeDisruptionSchemeFromNode(nodeAndClient); - NodeAndClient previous = nodes.remove(nodeAndClient.name); + final NodeAndClient previous = removeNode(nodeAndClient); assert previous == nodeAndClient; nodeAndClient.close(); } @@ -1714,16 +1737,9 @@ public void restartRandomDataNode() throws Exception { /** * Restarts a random data node in the cluster and calls the callback during restart. */ - public void restartRandomDataNode(RestartCallback callback) throws Exception { - restartRandomNode(new DataNodePredicate(), callback); - } - - /** - * Restarts a random node in the cluster and calls the callback during restart. - */ - private synchronized void restartRandomNode(Predicate predicate, RestartCallback callback) throws Exception { + public synchronized void restartRandomDataNode(RestartCallback callback) throws Exception { ensureOpen(); - NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate); + NodeAndClient nodeAndClient = getRandomNodeAndClient(InternalTestCluster.DATA_NODE_PREDICATE); if (nodeAndClient != null) { restartNode(nodeAndClient, callback); } @@ -1761,6 +1777,7 @@ public synchronized void rollingRestart(RestartCallback callback) throws Excepti } private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) throws Exception { + assert Thread.holdsLock(this); logger.info("Restarting node [{}] ", nodeAndClient.name); if (activeDisruptionScheme != null) { @@ -1780,8 +1797,9 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) nodeAndClient.startNode(); success = true; } finally { - if (success == false) - nodes.remove(nodeAndClient.name); + if (success == false) { + removeNode(nodeAndClient); + } } if (activeDisruptionScheme != null) { @@ -1801,7 +1819,16 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) } } + private NodeAndClient removeNode(NodeAndClient nodeAndClient) { + assert Thread.holdsLock(this); + final NavigableMap newNodes = new TreeMap<>(nodes); + final NodeAndClient previous = newNodes.remove(nodeAndClient.name); + nodes = Collections.unmodifiableNavigableMap(newNodes); + return previous; + } + private Set excludeMasters(Collection nodeAndClients) { + assert Thread.holdsLock(this); final Set excludedNodeIds = new HashSet<>(); if (autoManageMinMasterNodes && nodeAndClients.size() > 0) { @@ -1820,7 +1847,7 @@ private Set excludeMasters(Collection nodeAndClients) { logger.info("adding voting config exclusions {} prior to restart/shutdown", excludedNodeIds); try { client().execute(AddVotingConfigExclusionsAction.INSTANCE, - new AddVotingConfigExclusionsRequest(excludedNodeIds.toArray(new String[0]))).get(); + new AddVotingConfigExclusionsRequest(excludedNodeIds.toArray(Strings.EMPTY_ARRAY))).get(); } catch (InterruptedException | ExecutionException e) { throw new AssertionError("unexpected", e); } @@ -1834,6 +1861,7 @@ private Set excludeMasters(Collection nodeAndClients) { } private void removeExclusions(Set excludedNodeIds) { + assert Thread.holdsLock(this); if (excludedNodeIds.isEmpty() == false) { logger.info("removing voting config exclusions for {} after restart/shutdown", excludedNodeIds); try { @@ -1852,7 +1880,7 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception int numNodesRestarted = 0; final Settings[] newNodeSettings = new Settings[nextNodeId.get()]; Map, List> nodesByRoles = new HashMap<>(); - Set[] rolesOrderedByOriginalStartupOrder = new Set[nextNodeId.get()]; + Set[] rolesOrderedByOriginalStartupOrder = new Set[nextNodeId.get()]; final int minMasterNodes = autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1; for (NodeAndClient nodeAndClient : nodes.values()) { callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); @@ -1867,7 +1895,7 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); } - assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == nodes.size(); + assert nodesByRoles.values().stream().mapToInt(List::size).sum() == nodes.size(); // randomize start up order, but making sure that: // 1) A data folder that was assigned to a data node will stay so @@ -1885,7 +1913,7 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception final List nodesByRole = nodesByRoles.get(roles); startUpOrder.add(nodesByRole.remove(0)); } - assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == 0; + assert nodesByRoles.values().stream().mapToInt(List::size).sum() == 0; for (NodeAndClient nodeAndClient : startUpOrder) { logger.info("creating node [{}] ", nodeAndClient.name); @@ -1922,17 +1950,14 @@ public String getMasterName(@Nullable String viaNode) { } } - synchronized Set allDataNodesButN(int numNodes) { - return nRandomDataNodes(numDataNodes() - numNodes); - } - - private synchronized Set nRandomDataNodes(int numNodes) { + synchronized Set allDataNodesButN(int count) { + final int numNodes = numDataNodes() - count; assert size() >= numNodes; Map dataNodes = nodes .entrySet() .stream() - .filter(new EntryNodePredicate(new DataNodePredicate())) + .filter(entry -> DATA_NODE_PREDICATE.test(entry.getValue())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); final HashSet set = new HashSet<>(); final Iterator iterator = dataNodes.keySet().iterator(); @@ -1969,7 +1994,8 @@ public synchronized Set nodesInclude(String index) { * If {@link #bootstrapMasterNodeIndex} is -1 (default), this method does nothing. */ private List bootstrapMasterNodeWithSpecifiedIndex(List allNodesSettings) { - if (getBootstrapMasterNodeIndex() == -1) { // fast-path + assert Thread.holdsLock(this); + if (bootstrapMasterNodeIndex == -1) { // fast-path return allNodesSettings; } @@ -2013,36 +2039,36 @@ private List bootstrapMasterNodeWithSpecifiedIndex(List allN /** * Starts a node with default settings and returns its name. */ - public synchronized String startNode() { + public String startNode() { return startNode(Settings.EMPTY); } /** * Starts a node with the given settings builder and returns its name. */ - public synchronized String startNode(Settings.Builder settings) { + public String startNode(Settings.Builder settings) { return startNode(settings.build()); } /** * Starts a node with the given settings and returns its name. */ - public synchronized String startNode(Settings settings) { + public String startNode(Settings settings) { return startNodes(settings).get(0); } /** * Starts multiple nodes with default settings and returns their names */ - public synchronized List startNodes(int numOfNodes) { + public List startNodes(int numOfNodes) { return startNodes(numOfNodes, Settings.EMPTY); } /** * Starts multiple nodes with the given settings and returns their names */ - public synchronized List startNodes(int numOfNodes, Settings settings) { - return startNodes(Collections.nCopies(numOfNodes, settings).stream().toArray(Settings[]::new)); + public List startNodes(int numOfNodes, Settings settings) { + return startNodes(Collections.nCopies(numOfNodes, settings).toArray(new Settings[0])); } /** @@ -2100,11 +2126,11 @@ public synchronized List startNodes(Settings... extraSettings) { return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); } - public synchronized List startMasterOnlyNodes(int numNodes) { + public List startMasterOnlyNodes(int numNodes) { return startMasterOnlyNodes(numNodes, Settings.EMPTY); } - public synchronized List startMasterOnlyNodes(int numNodes, Settings settings) { + public List startMasterOnlyNodes(int numNodes, Settings settings) { Settings settings1 = Settings.builder() .put(settings) .put(Node.NODE_MASTER_SETTING.getKey(), true) @@ -2113,17 +2139,11 @@ public synchronized List startMasterOnlyNodes(int numNodes, Settings set return startNodes(numNodes, settings1); } - public synchronized List startDataOnlyNodes(int numNodes) { - return startDataOnlyNodes(numNodes, Settings.EMPTY); - } - - public synchronized List startDataOnlyNodes(int numNodes, Settings settings) { - Settings settings1 = Settings.builder() - .put(settings) - .put(Node.NODE_MASTER_SETTING.getKey(), false) - .put(Node.NODE_DATA_SETTING.getKey(), true) - .build(); - return startNodes(numNodes, settings1); + public List startDataOnlyNodes(int numNodes) { + return startNodes( + numNodes, + Settings.builder().put(Settings.EMPTY).put(Node.NODE_MASTER_SETTING.getKey(), false) + .put(Node.NODE_DATA_SETTING.getKey(), true).build()); } /** @@ -2131,7 +2151,7 @@ public synchronized List startDataOnlyNodes(int numNodes, Settings setti * * @param eligibleMasterNodeCount the number of master eligible nodes to use as basis for the min master node setting */ - private int updateMinMasterNodes(int eligibleMasterNodeCount) { + private void updateMinMasterNodes(int eligibleMasterNodeCount) { assert autoManageMinMasterNodes; final int minMasterNodes = getMinMasterNodes(eligibleMasterNodeCount); if (getMasterNodesCount() > 0) { @@ -2146,23 +2166,22 @@ private int updateMinMasterNodes(int eligibleMasterNodeCount) { minMasterNodes, getMasterNodesCount()); } } - return minMasterNodes; } /** calculates a min master nodes value based on the given number of master nodes */ - private int getMinMasterNodes(int eligibleMasterNodes) { + private static int getMinMasterNodes(int eligibleMasterNodes) { return eligibleMasterNodes / 2 + 1; } private int getMasterNodesCount() { - return (int)nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count(); + return (int) nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count(); } - public synchronized String startMasterOnlyNode() { + public String startMasterOnlyNode() { return startMasterOnlyNode(Settings.EMPTY); } - public synchronized String startMasterOnlyNode(Settings settings) { + public String startMasterOnlyNode(Settings settings) { Settings settings1 = Settings.builder() .put(settings) .put(Node.NODE_MASTER_SETTING.getKey(), true) @@ -2171,10 +2190,11 @@ public synchronized String startMasterOnlyNode(Settings settings) { return startNode(settings1); } - public synchronized String startDataOnlyNode() { + public String startDataOnlyNode() { return startDataOnlyNode(Settings.EMPTY); } - public synchronized String startDataOnlyNode(Settings settings) { + + public String startDataOnlyNode(Settings settings) { Settings settings1 = Settings.builder() .put(settings) .put(Node.NODE_MASTER_SETTING.getKey(), false) @@ -2185,7 +2205,9 @@ public synchronized String startDataOnlyNode(Settings settings) { private synchronized void publishNode(NodeAndClient nodeAndClient) { assert !nodeAndClient.node().isClosed(); - nodes.put(nodeAndClient.name, nodeAndClient); + final NavigableMap newNodes = new TreeMap<>(nodes); + newNodes.put(nodeAndClient.name, nodeAndClient); + nodes = Collections.unmodifiableNavigableMap(newNodes); applyDisruptionSchemeToNode(nodeAndClient); } @@ -2200,10 +2222,10 @@ public int numDataNodes() { @Override public int numDataAndMasterNodes() { - return dataAndMasterNodes().size(); + return filterNodes(nodes, DATA_NODE_PREDICATE.or(MASTER_NODE_PREDICATE)).size(); } - public synchronized int numMasterNodes() { + public int numMasterNodes() { return filterNodes(nodes, NodeAndClient::isMasterEligible).size(); } @@ -2218,7 +2240,8 @@ public void clearDisruptionScheme() { clearDisruptionScheme(true); } - public void clearDisruptionScheme(boolean ensureHealthyCluster) { + // synchronized to prevent concurrently modifying the cluster. + public synchronized void clearDisruptionScheme(boolean ensureHealthyCluster) { if (activeDisruptionScheme != null) { TimeValue expectedHealingTime = activeDisruptionScheme.expectedTimeToHeal(); logger.info("Clearing active scheme {}, expected healing time {}", activeDisruptionScheme, expectedHealingTime); @@ -2245,15 +2268,11 @@ private void removeDisruptionSchemeFromNode(NodeAndClient nodeAndClient) { } } - private synchronized Collection dataNodeAndClients() { - return filterNodes(nodes, new DataNodePredicate()); - } - - private synchronized Collection dataAndMasterNodes() { - return filterNodes(nodes, new DataNodePredicate().or(new MasterNodePredicate())); + private Collection dataNodeAndClients() { + return filterNodes(nodes, DATA_NODE_PREDICATE); } - private synchronized Collection filterNodes(Map map, + private static Collection filterNodes(Map map, Predicate predicate) { return map .values() @@ -2262,51 +2281,16 @@ private synchronized Collection filterNodes(Map { - @Override - public boolean test(NodeAndClient nodeAndClient) { - return DiscoveryNode.isDataNode(nodeAndClient.node.settings()); - } - } - - private static final class MasterNodePredicate implements Predicate { - @Override - public boolean test(NodeAndClient nodeAndClient) { - return DiscoveryNode.isMasterNode(nodeAndClient.node.settings()); - } - } - private static final class NodeNamePredicate implements Predicate { - private final HashSet nodeNames; - - NodeNamePredicate(String... nodeNames) { - this.nodeNames = Sets.newHashSet(nodeNames); - } + private final String nodeName; - @Override - public boolean test(NodeAndClient nodeAndClient) { - return nodeNames.contains(nodeAndClient.getName()); + NodeNamePredicate(String nodeName) { + this.nodeName = nodeName; } - } - private static final class NoDataNoMasterNodePredicate implements Predicate { @Override public boolean test(NodeAndClient nodeAndClient) { - return DiscoveryNode.isMasterNode(nodeAndClient.node.settings()) == false && - DiscoveryNode.isDataNode(nodeAndClient.node.settings()) == false; - } - } - - private static final class EntryNodePredicate implements Predicate> { - private final Predicate delegateNodePredicate; - - EntryNodePredicate(Predicate delegateNodePredicate) { - this.delegateNodePredicate = delegateNodePredicate; - } - - @Override - public boolean test(Map.Entry entry) { - return delegateNodePredicate.test(entry.getValue()); + return nodeName.equals(nodeAndClient.getName()); } } @@ -2338,8 +2322,7 @@ synchronized String routingKeyForShard(Index index, int shard, Random random) { } @Override - public synchronized Iterable getClients() { - ensureOpen(); + public Iterable getClients() { return () -> { ensureOpen(); final Iterator iterator = nodes.values().iterator(); @@ -2430,9 +2413,18 @@ public void ensureEstimatedStats() { final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node); CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA); assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L)); - CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); - assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", - acctBreaker.getUsed(), equalTo(0L)); + + // Mute this assertion until we have a new Lucene snapshot with https://issues.apache.org/jira/browse/LUCENE-8809. + // try { + // assertBusy(() -> { + // CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); + // assertThat("Accounting breaker not reset to 0 on node: " + name + ", are there still Lucene indices around?", + // acctBreaker.getUsed(), equalTo(0L)); + // }); + // } catch (Exception e) { + // throw new AssertionError("Exception during check for accounting breaker reset to 0", e); + // } + // Anything that uses transport or HTTP can increase the // request breaker (because they use bigarrays), because of // that the breaker can sometimes be incremented from ping @@ -2447,7 +2439,7 @@ public void ensureEstimatedStats() { assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L)); }); } catch (Exception e) { - fail("Exception during check for request breaker reset to 0: " + e); + throw new AssertionError("Exception during check for request breaker reset to 0", e); } NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node); @@ -2465,7 +2457,7 @@ public void ensureEstimatedStats() { } @Override - public void assertAfterTest() throws IOException { + public synchronized void assertAfterTest() throws IOException { super.assertAfterTest(); assertRequestsFinished(); for (NodeAndClient nodeAndClient : nodes.values()) { @@ -2473,7 +2465,7 @@ public void assertAfterTest() throws IOException { Set shardIds = env.lockedShards(); for (ShardId id : shardIds) { try { - env.shardLock(id, TimeUnit.SECONDS.toMillis(5)).close(); + env.shardLock(id, "InternalTestCluster assert after test", TimeUnit.SECONDS.toMillis(5)).close(); } catch (ShardLockObtainFailedException ex) { fail("Shard " + id + " is still locked after 5 sec waiting"); } @@ -2482,6 +2474,7 @@ public void assertAfterTest() throws IOException { } private void assertRequestsFinished() { + assert Thread.holdsLock(this); if (size() > 0) { for (NodeAndClient nodeAndClient : nodes.values()) { CircuitBreaker inFlightRequestsBreaker = getInstance(CircuitBreakerService.class, nodeAndClient.name) diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index ccb010e2a915e..22a12e74c3516 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -24,8 +24,8 @@ import org.apache.logging.log4j.core.filter.RegexFilter; import org.elasticsearch.common.regex.Regex; -import java.util.ArrayList; import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.regex.Pattern; import static org.hamcrest.CoreMatchers.equalTo; @@ -42,7 +42,12 @@ public class MockLogAppender extends AbstractAppender { public MockLogAppender() throws IllegalAccessException { super("mock", RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); - expectations = new ArrayList<>(); + /* + * We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs, + * we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a + * modification from #addExpectation. + */ + expectations = new CopyOnWriteArrayList<>(); } public void addExpectation(LoggingExpectation expectation) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 84c480b8d510b..20c4601c19953 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -43,7 +43,7 @@ public class VersionUtils { * rules here match up with the rules in gradle then this should * produce sensible results. * @return a tuple containing versions with backwards compatibility - * guarantees in v1 and versions without the guranteees in v2 + * guarantees in v1 and versions without the guarantees in v2 */ static Tuple, List> resolveReleasedVersions(Version current, Class versionClass) { // group versions into major version @@ -52,7 +52,7 @@ static Tuple, List> resolveReleasedVersions(Version curre // this breaks b/c 5.x is still in version list but master doesn't care about it! //assert majorVersions.size() == 2; // TODO: remove oldVersions, we should only ever have 2 majors in Version - List oldVersions = majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList()); + List> oldVersions = splitByMinor(majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList())); List> previousMajor = splitByMinor(majorVersions.get((int)current.major - 1)); List> currentMajor = splitByMinor(majorVersions.get((int)current.major)); @@ -67,7 +67,11 @@ static Tuple, List> resolveReleasedVersions(Version curre // on a stable or release branch, ie N.x stableVersions = currentMajor; // remove the next maintenance bugfix - moveLastToUnreleased(previousMajor, unreleasedVersions); + final Version prevMajorLastMinor = moveLastToUnreleased(previousMajor, unreleasedVersions); + if (prevMajorLastMinor.revision == 0 && previousMajor.isEmpty() == false) { + // The latest minor in the previous major is a ".0" release, so there must be an unreleased bugfix for the minor before that + moveLastToUnreleased(previousMajor, unreleasedVersions); + } } // remove next minor @@ -78,12 +82,21 @@ static Tuple, List> resolveReleasedVersions(Version curre moveLastToUnreleased(stableVersions, unreleasedVersions); } // remove the next bugfix - moveLastToUnreleased(stableVersions, unreleasedVersions); + if (stableVersions.isEmpty() == false) { + moveLastToUnreleased(stableVersions, unreleasedVersions); + } } - List releasedVersions = Stream.concat(oldVersions.stream(), - Stream.concat(previousMajor.stream(), currentMajor.stream()).flatMap(List::stream)) - .collect(Collectors.toList()); + // If none of the previous major was released, then the last minor and bugfix of the old version was not released either. + if (previousMajor.isEmpty()) { + assert currentMajor.isEmpty() : currentMajor; + // minor of the old version is being staged + moveLastToUnreleased(oldVersions, unreleasedVersions); + // bugix of the old version is also being staged + moveLastToUnreleased(oldVersions, unreleasedVersions); + } + List releasedVersions = Stream.of(oldVersions, previousMajor, currentMajor) + .flatMap(List::stream).flatMap(List::stream).collect(Collectors.toList()); Collections.sort(unreleasedVersions); // we add unreleased out of order, so need to sort here return new Tuple<>(Collections.unmodifiableList(releasedVersions), Collections.unmodifiableList(unreleasedVersions)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java index 24cea25274ff3..c523aa15e58ac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/DisruptableMockTransport.java @@ -116,8 +116,10 @@ protected void onSendRequest(long requestId, String action, TransportRequest req destinationTransport.execute(action, new Runnable() { @Override public void run() { - switch (getConnectionStatus(destinationTransport.getLocalNode())) { + final ConnectionStatus connectionStatus = getConnectionStatus(destinationTransport.getLocalNode()); + switch (connectionStatus) { case BLACK_HOLE: + case BLACK_HOLE_REQUESTS_ONLY: onBlackholedDuringSend(requestId, action, destinationTransport); break; @@ -128,6 +130,9 @@ public void run() { case CONNECTED: onConnectedDuringSend(requestId, action, request, destinationTransport); break; + + default: + throw new AssertionError("unexpected status: " + connectionStatus); } } @@ -197,11 +202,20 @@ public void sendResponse(final TransportResponse response) { execute(action, new Runnable() { @Override public void run() { - if (destinationTransport.getConnectionStatus(getLocalNode()) != ConnectionStatus.CONNECTED) { - logger.trace("dropping response to {}: channel is not CONNECTED", - requestDescription); - } else { - handleResponse(requestId, response); + final ConnectionStatus connectionStatus = destinationTransport.getConnectionStatus(getLocalNode()); + switch (connectionStatus) { + case CONNECTED: + case BLACK_HOLE_REQUESTS_ONLY: + handleResponse(requestId, response); + break; + + case BLACK_HOLE: + case DISCONNECTED: + logger.trace("dropping response to {}: channel is {}", requestDescription, connectionStatus); + break; + + default: + throw new AssertionError("unexpected status: " + connectionStatus); } } @@ -217,11 +231,20 @@ public void sendResponse(Exception exception) { execute(action, new Runnable() { @Override public void run() { - if (destinationTransport.getConnectionStatus(getLocalNode()) != ConnectionStatus.CONNECTED) { - logger.trace("dropping response to {}: channel is not CONNECTED", - requestDescription); - } else { - handleRemoteError(requestId, exception); + final ConnectionStatus connectionStatus = destinationTransport.getConnectionStatus(getLocalNode()); + switch (connectionStatus) { + case CONNECTED: + case BLACK_HOLE_REQUESTS_ONLY: + handleRemoteError(requestId, exception); + break; + + case BLACK_HOLE: + case DISCONNECTED: + logger.trace("dropping exception response to {}: channel is {}", requestDescription, connectionStatus); + break; + + default: + throw new AssertionError("unexpected status: " + connectionStatus); } } @@ -251,9 +274,29 @@ public String toString() { } } + /** + * Response type from {@link DisruptableMockTransport#getConnectionStatus(DiscoveryNode)} indicating whether, and how, messages should + * be disrupted on this transport. + */ public enum ConnectionStatus { + /** + * No disruption: deliver messages normally. + */ CONNECTED, - DISCONNECTED, // network requests to or from this node throw a ConnectTransportException - BLACK_HOLE // network traffic to or from the corresponding node is silently discarded + + /** + * Simulate disconnection: inbound and outbound messages throw a {@link ConnectTransportException}. + */ + DISCONNECTED, + + /** + * Simulate a blackhole partition: inbound and outbound messages are silently discarded. + */ + BLACK_HOLE, + + /** + * Simulate an asymmetric partition: outbound messages are silently discarded, but inbound messages are delivered normally. + */ + BLACK_HOLE_REQUESTS_ONLY } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 58e126b4bed4a..b1a4c42cbfd8e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.junit.listeners; import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; @@ -38,6 +37,7 @@ import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX; +import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTCLASS; import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_BLACKLIST; import static org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase.REST_TESTS_SUITE; @@ -77,8 +77,14 @@ public void testFailure(Failure failure) throws Exception { final String gradlew = Constants.WINDOWS ? "gradlew" : "./gradlew"; final StringBuilder b = new StringBuilder("REPRODUCE WITH: " + gradlew + " "); String task = System.getProperty("tests.task"); - // TODO: enforce (intellij still runs the runner?) or use default "test" but that won't work for integ + + // append Gradle test runner test filter string b.append(task); + b.append(" --tests \""); + b.append(failure.getDescription().getClassName()); + b.append("."); + b.append(failure.getDescription().getMethodName()); + b.append("\""); GradleMessageBuilder gradleMessageBuilder = new GradleMessageBuilder(b); gradleMessageBuilder.appendAllOpts(failure.getDescription()); @@ -106,11 +112,6 @@ public GradleMessageBuilder(StringBuilder b) { public ReproduceErrorMessageBuilder appendAllOpts(Description description) { super.appendAllOpts(description); - if (description.getMethodName() != null) { - //prints out the raw method description instead of methodName(description) which filters out the parameters - super.appendOpt(SYSPROP_TESTMETHOD(), "\"" + description.getMethodName() + "\""); - } - return appendESProperties(); } @@ -128,6 +129,11 @@ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there! return this; } + if (sysPropName.equals(SYSPROP_TESTCLASS())) { + //don't print out the test class, we print it ourselves in appendAllOpts + //without filtering out the parameters (needed for REST tests) + return this; + } if (sysPropName.equals(SYSPROP_TESTMETHOD())) { //don't print out the test method, we print it ourselves in appendAllOpts //without filtering out the parameters (needed for REST tests) @@ -143,7 +149,7 @@ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) return this; } - public ReproduceErrorMessageBuilder appendESProperties() { + private ReproduceErrorMessageBuilder appendESProperties() { appendProperties("tests.es.logger.level"); if (inVerifyPhase()) { // these properties only make sense for integration tests diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 177cdaad941f4..a82f6c82f44e4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -52,6 +52,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.AfterClass; @@ -72,6 +73,7 @@ import java.security.cert.CertificateException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -256,13 +258,13 @@ public static RequestOptions expectVersionSpecificWarnings(Consumer consumer.current(warnings)); } - + /** - * Creates RequestOptions designed to ignore [types removal] warnings but nothing else + * Creates RequestOptions designed to ignore [types removal] warnings but nothing else * @deprecated this method is only required while we deprecate types and can be removed in 8.0 */ @Deprecated - public static RequestOptions allowTypeRemovalWarnings() { + public static RequestOptions allowTypesRemovalWarnings() { Builder builder = RequestOptions.DEFAULT.toBuilder(); builder.setWarningsHandler(new WarningsHandler() { @Override @@ -277,7 +279,7 @@ public boolean warningsShouldFailRequest(List warnings) { } }); return builder.build(); - } + } /** * Construct an HttpHost from the given host and port @@ -292,6 +294,10 @@ protected HttpHost buildHttpHost(String host, int port) { @After public final void cleanUpCluster() throws Exception { if (preserveClusterUponCompletion() == false) { + if (nodeVersions.stream().noneMatch(version -> version.before(Version.V_6_2_0))) { + // wait_for_no_initializing_shards added in 6.2 + ensureNoInitializingShards(); + } wipeCluster(); waitForClusterStateUpdatesToFinish(); logIfThereAreRunningTasks(); @@ -458,6 +464,17 @@ protected boolean preserveILMPoliciesUponCompletion() { } private void wipeCluster() throws Exception { + + // Cleanup rollup before deleting indices. A rollup job might have bulks in-flight, + // so we need to fully shut them down first otherwise a job might stall waiting + // for a bulk to finish against a non-existing index (and then fail tests) + if (hasXPack && false == preserveRollupJobsUponCompletion()) { + wipeRollupJobs(); + waitForPendingRollupTasks(); + } + + final Map>> inProgressSnapshots = wipeSnapshots(); + if (preserveIndicesUponCompletion() == false) { // wipe indices try { @@ -498,29 +515,26 @@ private void wipeCluster() throws Exception { } } - wipeSnapshots(); - // wipe cluster settings if (preserveClusterSettings() == false) { wipeClusterSettings(); } - if (hasXPack && false == preserveRollupJobsUponCompletion()) { - wipeRollupJobs(); - waitForPendingRollupTasks(); - } - if (hasXPack && false == preserveILMPoliciesUponCompletion()) { deleteAllPolicies(); } + + assertTrue("Found in progress snapshots [" + inProgressSnapshots + "].", inProgressSnapshots.isEmpty()); } /** * Wipe fs snapshots we created one by one and all repositories so that the next test can create the repositories fresh and they'll * start empty. There isn't an API to delete all snapshots. There is an API to delete all snapshot repositories but that leaves all of * the snapshots intact in the repository. + * @return Map of repository name to list of snapshots found in unfinished state */ - private void wipeSnapshots() throws IOException { + private Map>> wipeSnapshots() throws IOException { + final Map>> inProgressSnapshots = new HashMap<>(); for (Map.Entry repo : entityAsMap(adminClient.performRequest(new Request("GET", "/_snapshot/_all"))).entrySet()) { String repoName = repo.getKey(); Map repoSpec = (Map) repo.getValue(); @@ -533,6 +547,9 @@ private void wipeSnapshots() throws IOException { for (Object snapshot : snapshots) { Map snapshotInfo = (Map) snapshot; String name = (String) snapshotInfo.get("snapshot"); + if (SnapshotState.valueOf((String) snapshotInfo.get("state")).completed() == false) { + inProgressSnapshots.computeIfAbsent(repoName, key -> new ArrayList<>()).add(snapshotInfo); + } logger.debug("wiping snapshot [{}/{}]", repoName, name); adminClient().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + name)); } @@ -542,6 +559,7 @@ private void wipeSnapshots() throws IOException { adminClient().performRequest(new Request("DELETE", "_snapshot/" + repoName)); } } + return inProgressSnapshots; } /** @@ -790,7 +808,20 @@ protected static void ensureGreen(String index) throws IOException { request.addParameter("wait_for_no_relocating_shards", "true"); request.addParameter("timeout", "70s"); request.addParameter("level", "shards"); - client().performRequest(request); + try { + client().performRequest(request); + } catch (ResponseException e) { + if (e.getResponse().getStatusLine().getStatusCode() == HttpStatus.SC_REQUEST_TIMEOUT) { + try { + final Response clusterStateResponse = client().performRequest(new Request("GET", "/_cluster/state")); + fail("timed out waiting for green state for index [" + index + "] " + + "cluster state [" + EntityUtils.toString(clusterStateResponse.getEntity()) + "]"); + } catch (Exception inner) { + e.addSuppressed(inner); + } + } + throw e; + } } /** @@ -802,7 +833,7 @@ protected static void ensureNoInitializingShards() throws IOException { request.addParameter("wait_for_no_initializing_shards", "true"); request.addParameter("timeout", "70s"); request.addParameter("level", "shards"); - client().performRequest(request); + adminClient().performRequest(request); } protected static void createIndex(String name, Settings settings) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index ab155889ac687..cd48890d065df 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; @@ -291,10 +292,11 @@ private static void validateSpec(ClientYamlSuiteRestSpec restSpec) { } } - private static Tuple readVersionsFromCatNodes(RestClient restClient) throws IOException { + private Tuple readVersionsFromCatNodes(RestClient restClient) throws IOException { // we simply go to the _cat/nodes API and parse all versions in the cluster - Request request = new Request("GET", "/_cat/nodes"); + final Request request = new Request("GET", "/_cat/nodes"); request.addParameter("h", "version,master"); + request.setOptions(getCatNodesVersionMasterRequestOptions()); Response response = restClient.performRequest(request); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); String nodesCatResponse = restTestResponse.getBodyAsString(); @@ -319,6 +321,10 @@ private static Tuple readVersionsFromCatNodes(RestClient restC return new Tuple<>(version, masterVersion); } + protected RequestOptions getCatNodesVersionMasterRequestOptions() { + return RequestOptions.DEFAULT; + } + public void test() throws IOException { //skip test if it matches one of the blacklist globs for (BlacklistedPathPatternMatcher blacklistedPathMatcher : blacklistPathMatchers) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 070343bf68785..0d0f85545774f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.BaseDirectoryWrapper; @@ -55,6 +54,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Random; +import java.util.Set; public class MockFSDirectoryService extends FsDirectoryService { @@ -179,6 +179,12 @@ public synchronized void crash() throws IOException { super.crash(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } final class CloseableDirectory implements Closeable { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index a6dbd1561936e..e39f5d03cba07 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -80,7 +80,7 @@ public class MockTransport implements Transport, LifecycleComponent { public TransportService createTransportService(Settings settings, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { - StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), + StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this), settings, this, threadPool); connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> nodeConnected(discoveryNode)); connectionManager.setDefaultGetConnectionBehavior((cm, discoveryNode) -> createConnection(discoveryNode)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index e3d7e72a0bb97..81e10b1f99c9a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -81,7 +81,7 @@ * Matching requests to rules is based on the delegate address associated with the * discovery node of the request, namely by DiscoveryNode.getAddress(). * This address is usually the publish address of the node but can also be a different one - * (for example, @see org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing, which constructs + * (for example, @see org.elasticsearch.discovery.HandshakingTransportAddressConnector, which constructs * fake DiscoveryNode instances where the publish address is one of the bound addresses). */ public final class MockTransportService extends TransportService { @@ -157,7 +157,7 @@ private MockTransportService(Settings settings, StubbableTransport transport, Th Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { super(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ConnectionManager(settings, transport, threadPool), settings, transport, threadPool)); + new StubbableConnectionManager(new ConnectionManager(settings, transport), settings, transport, threadPool)); this.original = transport.getDelegate(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 41ac87f0af576..108e1bf5e24b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -42,7 +42,7 @@ public class StubbableConnectionManager extends ConnectionManager { private volatile NodeConnectedBehavior defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; public StubbableConnectionManager(ConnectionManager delegate, Settings settings, Transport transport, ThreadPool threadPool) { - super(settings, transport, threadPool); + super(settings, transport); this.delegate = delegate; this.getConnectionBehaviors = new ConcurrentHashMap<>(); this.nodeConnectedBehaviors = new ConcurrentHashMap<>(); diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index 508949b561fb4..6d1d6839e1cc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -63,6 +63,14 @@ public final boolean isRunningAgainstOldCluster() { private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + /** + * @return true if test is running against an old cluster before that last major, in this case + * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link Version#V_7_0_0} + */ + protected final boolean isRunningAgainstAncientCluster() { + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_7_0_0); + } + public final Version getOldClusterVersion() { return oldClusterVersion; } diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java index 14aa79e87956e..4060b7f5cd843 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/DisruptableMockTransportTests.java @@ -56,29 +56,32 @@ public class DisruptableMockTransportTests extends ESTestCase { - DiscoveryNode node1; - DiscoveryNode node2; + private DiscoveryNode node1; + private DiscoveryNode node2; - DisruptableMockTransport transport1; - DisruptableMockTransport transport2; + private TransportService service1; + private TransportService service2; - TransportService service1; - TransportService service2; + private DeterministicTaskQueue deterministicTaskQueue; - DeterministicTaskQueue deterministicTaskQueue; + private Set> disconnectedLinks; + private Set> blackholedLinks; + private Set> blackholedRequestLinks; - Set> disconnectedLinks; - Set> blackholedLinks; - - ConnectionStatus getConnectionStatus(DiscoveryNode sender, DiscoveryNode destination) { + private ConnectionStatus getConnectionStatus(DiscoveryNode sender, DiscoveryNode destination) { Tuple link = Tuple.tuple(sender, destination); if (disconnectedLinks.contains(link)) { assert blackholedLinks.contains(link) == false; + assert blackholedRequestLinks.contains(link) == false; return ConnectionStatus.DISCONNECTED; } if (blackholedLinks.contains(link)) { + assert blackholedRequestLinks.contains(link) == false; return ConnectionStatus.BLACK_HOLE; } + if (blackholedRequestLinks.contains(link)) { + return ConnectionStatus.BLACK_HOLE_REQUESTS_ONLY; + } return ConnectionStatus.CONNECTED; } @@ -89,13 +92,14 @@ public void initTransports() { disconnectedLinks = new HashSet<>(); blackholedLinks = new HashSet<>(); + blackholedRequestLinks = new HashSet<>(); List transports = new ArrayList<>(); deterministicTaskQueue = new DeterministicTaskQueue( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "dummy").build(), random()); - transport1 = new DisruptableMockTransport(node1, logger) { + final DisruptableMockTransport transport1 = new DisruptableMockTransport(node1, logger) { @Override protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { return DisruptableMockTransportTests.this.getConnectionStatus(getLocalNode(), destination); @@ -112,7 +116,7 @@ protected void execute(Runnable runnable) { } }; - transport2 = new DisruptableMockTransport(node2, logger) { + final DisruptableMockTransport transport2 = new DisruptableMockTransport(node2, logger) { @Override protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) { return DisruptableMockTransportTests.this.getConnectionStatus(getLocalNode(), destination); @@ -144,7 +148,6 @@ protected void execute(Runnable runnable) { service2.connectToNode(node1); } - private TransportRequestHandler requestHandlerShouldNotBeCalled() { return (request, channel, task) -> { throw new AssertionError("should not be called"); @@ -293,15 +296,21 @@ public void testUnavailableOnRequest() { deterministicTaskQueue.runAllRunnableTasks(); } + public void testUnavailableOnRequestOnly() { + registerRequestHandler(service1, requestHandlerShouldNotBeCalled()); + registerRequestHandler(service2, requestHandlerShouldNotBeCalled()); + blackholedRequestLinks.add(Tuple.tuple(node1, node2)); + send(service1, node2, responseHandlerShouldNotBeCalled()); + deterministicTaskQueue.runAllRunnableTasks(); + } + public void testDisconnectedOnSuccessfulResponse() throws IOException { registerRequestHandler(service1, requestHandlerShouldNotBeCalled()); AtomicReference responseHandlerChannel = new AtomicReference<>(); registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); - AtomicReference responseHandlerException = new AtomicReference<>(); send(service1, node2, responseHandlerShouldNotBeCalled()); deterministicTaskQueue.runAllRunnableTasks(); - assertNull(responseHandlerException.get()); assertNotNull(responseHandlerChannel.get()); disconnectedLinks.add(Tuple.tuple(node2, node1)); @@ -314,10 +323,8 @@ public void testDisconnectedOnExceptionalResponse() throws IOException { AtomicReference responseHandlerChannel = new AtomicReference<>(); registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); - AtomicReference responseHandlerException = new AtomicReference<>(); send(service1, node2, responseHandlerShouldNotBeCalled()); deterministicTaskQueue.runAllRunnableTasks(); - assertNull(responseHandlerException.get()); assertNotNull(responseHandlerChannel.get()); disconnectedLinks.add(Tuple.tuple(node2, node1)); @@ -330,10 +337,8 @@ public void testUnavailableOnSuccessfulResponse() throws IOException { AtomicReference responseHandlerChannel = new AtomicReference<>(); registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); - AtomicReference responseHandlerException = new AtomicReference<>(); send(service1, node2, responseHandlerShouldNotBeCalled()); deterministicTaskQueue.runAllRunnableTasks(); - assertNull(responseHandlerException.get()); assertNotNull(responseHandlerChannel.get()); blackholedLinks.add(Tuple.tuple(node2, node1)); @@ -346,10 +351,8 @@ public void testUnavailableOnExceptionalResponse() throws IOException { AtomicReference responseHandlerChannel = new AtomicReference<>(); registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); - AtomicReference responseHandlerException = new AtomicReference<>(); send(service1, node2, responseHandlerShouldNotBeCalled()); deterministicTaskQueue.runAllRunnableTasks(); - assertNull(responseHandlerException.get()); assertNotNull(responseHandlerChannel.get()); blackholedLinks.add(Tuple.tuple(node2, node1)); @@ -357,4 +360,43 @@ public void testUnavailableOnExceptionalResponse() throws IOException { deterministicTaskQueue.runAllRunnableTasks(); } + public void testUnavailableOnRequestOnlyReceivesSuccessfulResponse() throws IOException { + registerRequestHandler(service1, requestHandlerShouldNotBeCalled()); + AtomicReference responseHandlerChannel = new AtomicReference<>(); + registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); + + AtomicBoolean responseHandlerCalled = new AtomicBoolean(); + send(service1, node2, responseHandlerShouldBeCalledNormally(() -> responseHandlerCalled.set(true))); + + deterministicTaskQueue.runAllTasks(); + assertNotNull(responseHandlerChannel.get()); + assertFalse(responseHandlerCalled.get()); + + blackholedRequestLinks.add(Tuple.tuple(node1, node2)); + blackholedRequestLinks.add(Tuple.tuple(node2, node1)); + responseHandlerChannel.get().sendResponse(TransportResponse.Empty.INSTANCE); + + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue(responseHandlerCalled.get()); + } + + public void testUnavailableOnRequestOnlyReceivesExceptionalResponse() throws IOException { + registerRequestHandler(service1, requestHandlerShouldNotBeCalled()); + AtomicReference responseHandlerChannel = new AtomicReference<>(); + registerRequestHandler(service2, requestHandlerCaptures(responseHandlerChannel::set)); + + AtomicBoolean responseHandlerCalled = new AtomicBoolean(); + send(service1, node2, responseHandlerShouldBeCalledExceptionally(e -> responseHandlerCalled.set(true))); + + deterministicTaskQueue.runAllTasks(); + assertNotNull(responseHandlerChannel.get()); + assertFalse(responseHandlerCalled.get()); + + blackholedRequestLinks.add(Tuple.tuple(node1, node2)); + blackholedRequestLinks.add(Tuple.tuple(node2, node1)); + responseHandlerChannel.get().sendResponse(new Exception()); + + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue(responseHandlerCalled.get()); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java index a75b260fa3b7c..a2f6b3ed654e6 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.test.disruption; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -35,52 +37,67 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36205") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) public class NetworkDisruptionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - public void testNetworkPartitionWithNodeShutdown() throws IOException { - internalCluster().ensureAtLeastNumDataNodes(2); - String[] nodeNames = internalCluster().getNodeNames(); - NetworkDisruption networkDisruption = - new NetworkDisruption(new TwoPartitions(nodeNames[0], nodeNames[1]), new NetworkDisruption.NetworkUnresponsive()); - internalCluster().setDisruptionScheme(networkDisruption); - networkDisruption.startDisrupting(); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames[0])); - internalCluster().clearDisruptionScheme(); - } + private static final Settings DISRUPTION_TUNED_SETTINGS = Settings.builder() + .put(NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), "2s") + .build(); - public void testNetworkPartitionRemovalRestoresConnections() throws IOException { - Set nodes = new HashSet<>(); - nodes.addAll(Arrays.asList(internalCluster().getNodeNames())); - nodes.remove(internalCluster().getMasterName()); - if (nodes.size() <= 2) { - internalCluster().ensureAtLeastNumDataNodes(3 - nodes.size()); - nodes.addAll(Arrays.asList(internalCluster().getNodeNames())); - nodes.remove(internalCluster().getMasterName()); - } - Set side1 = new HashSet<>(randomSubsetOf(randomIntBetween(1, nodes.size() - 1), nodes)); + /** + * Creates 3 to 5 mixed-node cluster and splits it into 2 parts. + * The first part is guaranteed to have at least the majority of the nodes, + * so that master could be elected on this side. + */ + private Tuple, Set> prepareDisruptedCluster() { + int numOfNodes = randomIntBetween(3, 5); + internalCluster().setBootstrapMasterNodeIndex(numOfNodes - 1); + Set nodes = new HashSet<>(internalCluster().startNodes(numOfNodes, DISRUPTION_TUNED_SETTINGS)); + ensureGreen(); + assertThat(nodes.size(), greaterThanOrEqualTo(3)); + int majority = nodes.size() / 2 + 1; + Set side1 = new HashSet<>(randomSubsetOf(randomIntBetween(majority, nodes.size() - 1), nodes)); + assertThat(side1.size(), greaterThanOrEqualTo(majority)); Set side2 = new HashSet<>(nodes); side2.removeAll(side1); assertThat(side2.size(), greaterThanOrEqualTo(1)); NetworkDisruption networkDisruption = new NetworkDisruption(new TwoPartitions(side1, side2), - new NetworkDisruption.NetworkDisconnect()); + new NetworkDisruption.NetworkDisconnect()); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); - // sends some requests + + return Tuple.tuple(side1, side2); + } + + public void testClearDisruptionSchemeWhenNodeIsDown() throws IOException { + Tuple, Set> sides = prepareDisruptedCluster(); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(randomFrom(sides.v2()))); + internalCluster().clearDisruptionScheme(); + } + + public void testNetworkPartitionRemovalRestoresConnections() throws Exception { + Tuple, Set> sides = prepareDisruptedCluster(); + Set side1 = sides.v1(); + Set side2 = sides.v2(); + + // sends some requests to the majority side part client(randomFrom(side1)).admin().cluster().prepareNodesInfo().get(); - client(randomFrom(side2)).admin().cluster().prepareNodesInfo().get(); internalCluster().clearDisruptionScheme(); - // check all connections are restore + // check all connections are restored for (String nodeA : side1) { for (String nodeB : side2) { TransportService serviceA = internalCluster().getInstance(TransportService.class, nodeA); TransportService serviceB = internalCluster().getInstance(TransportService.class, nodeB); - assertTrue(nodeA + " is not connected to " + nodeB, serviceA.nodeConnected(serviceB.getLocalNode())); - assertTrue(nodeB + " is not connected to " + nodeA, serviceB.nodeConnected(serviceA.getLocalNode())); + // TODO assertBusy should not be here, see https://github.com/elastic/elasticsearch/issues/38348 + assertBusy(() -> { + assertTrue(nodeA + " is not connected to " + nodeB, serviceA.nodeConnected(serviceB.getLocalNode())); + assertTrue(nodeB + " is not connected to " + nodeA, serviceB.nodeConnected(serviceA.getLocalNode())); + }); } } } diff --git a/x-pack/build.gradle b/x-pack/build.gradle index a0db6d9a78b4c..9c90bbbbfc269 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -38,6 +38,5 @@ subprojects { ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-monitoring:${version}": xpackModule('monitoring')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-security:${version}": xpackModule('security')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-sql:${version}": xpackModule('sql')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-upgrade:${version}": xpackModule('upgrade')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-watcher:${version}": xpackModule('watcher')] } diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 518628e9fd0fb..5e56414afed24 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -92,12 +92,13 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build' // These file simply doesn't pass yet. We should figure out how to fix them. exclude 'en/watcher/reference/actions.asciidoc' + exclude 'en/rest-api/security/ssl.asciidoc' } Map setups = buildRestTests.setups setups['my_inactive_watch'] = ''' - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" active: false body: > @@ -216,7 +217,7 @@ setups['library'] = ''' ''' setups['sample_job'] = ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "sample_job" body: > { @@ -270,7 +271,7 @@ setups['farequote_data'] = setups['farequote_index'] + ''' ''' setups['farequote_job'] = setups['farequote_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "farequote" body: > { @@ -290,7 +291,7 @@ setups['farequote_job'] = setups['farequote_data'] + ''' ''' setups['farequote_datafeed'] = setups['farequote_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-farequote" body: > { @@ -300,7 +301,7 @@ setups['farequote_datafeed'] = setups['farequote_job'] + ''' ''' setups['ml_filter_safe_domains'] = ''' - do: - xpack.ml.put_filter: + ml.put_filter: filter_id: "safe_domains" body: > { @@ -364,7 +365,7 @@ setups['server_metrics_data'] = setups['server_metrics_index'] + ''' ''' setups['server_metrics_job'] = setups['server_metrics_data'] + ''' - do: - xpack.ml.put_job: + ml.put_job: job_id: "total-requests" body: > { @@ -386,7 +387,7 @@ setups['server_metrics_job'] = setups['server_metrics_data'] + ''' ''' setups['server_metrics_datafeed'] = setups['server_metrics_job'] + ''' - do: - xpack.ml.put_datafeed: + ml.put_datafeed: datafeed_id: "datafeed-total-requests" body: > { @@ -396,22 +397,22 @@ setups['server_metrics_datafeed'] = setups['server_metrics_job'] + ''' ''' setups['server_metrics_openjob'] = setups['server_metrics_datafeed'] + ''' - do: - xpack.ml.open_job: + ml.open_job: job_id: "total-requests" ''' setups['server_metrics_startdf'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.start_datafeed: + ml.start_datafeed: datafeed_id: "datafeed-total-requests" ''' setups['calendar_outages'] = ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' setups['calendar_outages_addevent'] = setups['calendar_outages'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } @@ -420,12 +421,12 @@ setups['calendar_outages_addevent'] = setups['calendar_outages'] + ''' ''' setups['calendar_outages_openjob'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" ''' setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + ''' - do: - xpack.ml.put_calendar: + ml.put_calendar: calendar_id: "planned-outages" body: > { @@ -434,7 +435,7 @@ setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + ''' ''' setups['calendar_outages_addevent'] = setups['calendar_outages_addjob'] + ''' - do: - xpack.ml.post_calendar_events: + ml.post_calendar_events: calendar_id: "planned-outages" body: > { "events" : [ @@ -473,7 +474,7 @@ setups['sensor_rollup_job'] = ''' node: type: keyword - do: - xpack.rollup.put_job: + rollup.put_job: id: "sensor" body: > { @@ -541,7 +542,7 @@ setups['sensor_started_rollup_job'] = ''' {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} - do: - xpack.rollup.put_job: + rollup.put_job: id: "sensor" body: > { @@ -571,7 +572,7 @@ setups['sensor_started_rollup_job'] = ''' ] } - do: - xpack.rollup.start_job: + rollup.start_job: id: "sensor" ''' diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index c59c44312ae60..7e14a6a0ee9b7 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -81,6 +81,7 @@ include::security/authenticate.asciidoc[] include::security/change-password.asciidoc[] include::security/clear-cache.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/create-api-keys.asciidoc[] include::security/put-app-privileges.asciidoc[] include::security/create-role-mappings.asciidoc[] include::security/create-roles.asciidoc[] @@ -91,14 +92,13 @@ include::security/delete-roles.asciidoc[] include::security/delete-users.asciidoc[] include::security/disable-users.asciidoc[] include::security/enable-users.asciidoc[] +include::security/get-api-keys.asciidoc[] include::security/get-app-privileges.asciidoc[] include::security/get-role-mappings.asciidoc[] include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] include::security/get-users.asciidoc[] include::security/has-privileges.asciidoc[] +include::security/invalidate-api-keys.asciidoc[] include::security/invalidate-tokens.asciidoc[] include::security/ssl.asciidoc[] -include::security/create-api-keys.asciidoc[] -include::security/invalidate-api-keys.asciidoc[] -include::security/get-api-keys.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc index 741a9d79feaf0..72ebc27a710c4 100644 --- a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc @@ -1,6 +1,9 @@ [role="xpack"] [[security-api-create-api-key]] -=== Create API Key API +=== Create API key API +++++ +Create API keys +++++ Creates an API key for access without requiring basic authentication. @@ -17,14 +20,15 @@ you can explicitly enable the `xpack.security.authc.api_key.enabled` setting. Wh you are running in production mode, a bootstrap check prevents you from enabling the API key service unless you also enable TLS on the HTTP interface. -A successful create API key API call returns a JSON structure that contains -the unique id, the name to identify API key, the API key and the expiration if -applicable for the API key in milliseconds. +A successful create API key API call returns a JSON structure that contains the +API key, its unique id, and its name. If applicable, it also returns expiration +information for the API key in milliseconds. -NOTE: By default API keys never expire. You can specify expiration at the time of -creation for the API keys. +NOTE: By default, API keys never expire. You can specify expiration information +when you create the API keys. -See <> for configuration settings related to API key service. +See <> for configuration settings related to API key +service. ==== Request Body @@ -33,15 +37,16 @@ The following parameters can be specified in the body of a POST or PUT request: `name`:: (string) Specifies the name for this API key. -`role_descriptors`:: -(array-of-role-descriptor) Optional array of role descriptor for this API key. The role descriptor -must be a subset of permissions of the authenticated user. The structure of role -descriptor is same as the request for create role API. For more details on role -see <>. -If the role descriptors are not provided then permissions of the authenticated user are applied. +`role_descriptors` (required):: +(array-of-role-descriptor) An array of role descriptors for this API key. This +parameter is required but can be an empty array, which applies the permissions +of the authenticated user. If you supply role descriptors, they must be a subset +of the authenticated user's permissions. The structure of role descriptor is the +same as the request for create role API. For more details, see +<>. `expiration`:: -(string) Optional expiration time for the API key. By default API keys never expire. +(string) Optional expiration time for the API key. By default, API keys never expire. ==== Examples diff --git a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc index ab2ef770cb124..0e5865c3cd9ae 100644 --- a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[security-api-get-api-key]] -=== Get API Key information API +=== Get API key information API ++++ Get API key information ++++ @@ -22,63 +22,98 @@ The following parameters can be specified in the query parameters of a GET reque pertain to retrieving api keys: `id` (optional):: -(string) An API key id. This parameter cannot be used with any of `name`, `realm_name` or - `username` are used. +(string) An API key id. This parameter cannot be used with any of `name`, +`realm_name` or `username` are used. `name` (optional):: -(string) An API key name. This parameter cannot be used with any of `id`, `realm_name` or - `username` are used. +(string) An API key name. This parameter cannot be used with any of `id`, +`realm_name` or `username` are used. `realm_name` (optional):: -(string) The name of an authentication realm. This parameter cannot be used with either `id` or `name`. +(string) The name of an authentication realm. This parameter cannot be used with +either `id` or `name`. `username` (optional):: -(string) The username of a user. This parameter cannot be used with either `id` or `name`. +(string) The username of a user. This parameter cannot be used with either `id` +or `name`. NOTE: While all parameters are optional, at least one of them is required. ==== Examples -The following example to retrieve the API key identified by specified `id`: +If you create an API key as follows: + +[source, js] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-api-key", + "role_descriptors": {} +} +------------------------------------------------------------ +// CONSOLE +// TEST + +A successful call returns a JSON structure that provides +API key information. For example: [source,js] -------------------------------------------------- -GET /_security/api_key?id=dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ== +{ + "id":"VuaCfGcBCdbkQm-e5aOx", + "name":"my-api-key", + "api_key":"ui2lp2axTNmsyakw9tvNnw" +} -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TESTRESPONSE[s/ui2lp2axTNmsyakw9tvNnw/$body.api_key/] -whereas the following example to retrieve the API key identified by specified `name`: +You can use the following example to retrieve the API key by ID: [source,js] -------------------------------------------------- -GET /_security/api_key?name=hadoop_myuser_key +GET /_security/api_key?id=VuaCfGcBCdbkQm-e5aOx -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TEST[continued] + +You can use the following example to retrieve the API key by name: + +[source,js] +-------------------------------------------------- +GET /_security/api_key?name=my-api-key +-------------------------------------------------- +// CONSOLE +// TEST[continued] The following example retrieves all API keys for the `native1` realm: [source,js] -------------------------------------------------- -GET /_xpack/api_key?realm_name=native1 +GET /_security/api_key?realm_name=native1 -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[continued] The following example retrieves all API keys for the user `myuser` in all realms: [source,js] -------------------------------------------------- -GET /_xpack/api_key?username=myuser +GET /_security/api_key?username=myuser -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[continued] Finally, the following example retrieves all API keys for the user `myuser` in the `native1` realm immediately: [source,js] -------------------------------------------------- -GET /_xpack/api_key?username=myuser&realm_name=native1 +GET /_security/api_key?username=myuser&realm_name=native1 -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[continued] A successful call returns a JSON structure that contains the information of one or more API keys that were retrieved. @@ -112,7 +147,8 @@ A successful call returns a JSON structure that contains the information of one <2> Id for the API key <3> Name of the API key <4> Creation time for the API key in milliseconds -<5> optional expiration time for the API key in milliseconds -<6> invalidation status for the API key, `true` if the key has been invalidated else `false` -<7> principal for which this API key was created -<8> realm name of the principal for which this API key was created +<5> Optional expiration time for the API key in milliseconds +<6> Invalidation status for the API key. If the key has been invalidated, it has +a value of `true`. Otherwise, it is `false`. +<7> Principal for which this API key was created +<8> Realm name of the principal for which this API key was created diff --git a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc index 92f1081bc2b85..984a8e36f0a93 100644 --- a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -29,11 +29,13 @@ privilege is assigned to the user. `index`:: `names`::: (list) A list of indices. -`allow_restricted_indices`::: (boolean) If `names` contains internal restricted -that also have to be covered by the has-privilege check, then this has to be -set to `true`. By default this is `false` because restricted indices should -generaly not be "visible" to APIs. For most use cases it is safe to ignore -this parameter. +`allow_restricted_indices`::: (boolean) This needs to be set to `true` (default +is `false`) if using wildcards or regexps for patterns that cover restricted +indices. Implicitly, restricted indices do not match index patterns because +restricted indices usually have limited privileges and including them in +pattern tests would render most such tests `false`. If restricted indices are +explicitly included in the `names` list, privileges will be checked against +them regardless of the value of `allow_restricted_indices`. `privileges`::: (list) A list of the privileges that you want to check for the specified indices. diff --git a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc index 4809e267ebd80..8e496fb58664f 100644 --- a/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-api-keys.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[security-api-invalidate-api-key]] -=== Invalidate API Key API +=== Invalidate API key API ++++ Invalidate API key ++++ @@ -13,8 +13,8 @@ Invalidates one or more API keys. ==== Description -The API keys created by <> can be invalidated -using this API. +The API keys created by <> can be +invalidated using this API. ==== Request Body @@ -22,79 +22,116 @@ The following parameters can be specified in the body of a DELETE request and pertain to invalidating api keys: `id` (optional):: -(string) An API key id. This parameter cannot be used with any of `name`, `realm_name` or - `username` are used. +(string) An API key id. This parameter cannot be used with any of `name`, +`realm_name` or `username` are used. `name` (optional):: -(string) An API key name. This parameter cannot be used with any of `id`, `realm_name` or - `username` are used. +(string) An API key name. This parameter cannot be used with any of `id`, +`realm_name` or `username` are used. `realm_name` (optional):: -(string) The name of an authentication realm. This parameter cannot be used with either `api_key_id` or `api_key_name`. +(string) The name of an authentication realm. This parameter cannot be used with +either `api_key_id` or `api_key_name`. `username` (optional):: -(string) The username of a user. This parameter cannot be used with either `api_key_id` or `api_key_name`. +(string) The username of a user. This parameter cannot be used with either +`api_key_id` or `api_key_name`. NOTE: While all parameters are optional, at least one of them is required. ==== Examples +If you create an API key as follows: + +[source, js] +------------------------------------------------------------ +POST /_security/api_key +{ + "name": "my-api-key", + "role_descriptors": {} +} +------------------------------------------------------------ +// CONSOLE +// TEST + +A successful call returns a JSON structure that provides +API key information. For example: + +[source,js] +-------------------------------------------------- +{ + "id":"VuaCfGcBCdbkQm-e5aOx", + "name":"my-api-key", + "api_key":"ui2lp2axTNmsyakw9tvNnw" +} +-------------------------------------------------- +// TESTRESPONSE[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TESTRESPONSE[s/ui2lp2axTNmsyakw9tvNnw/$body.api_key/] + The following example invalidates the API key identified by specified `id` immediately: [source,js] -------------------------------------------------- DELETE /_security/api_key { - "id" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" + "id" : "VuaCfGcBCdbkQm-e5aOx" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[s/VuaCfGcBCdbkQm-e5aOx/$body.id/] +// TEST[continued] -whereas the following example invalidates the API key identified by specified `name` immediately: +The following example invalidates the API key identified by specified `name` immediately: [source,js] -------------------------------------------------- DELETE /_security/api_key { - "name" : "hadoop_myuser_key" + "name" : "my-api-key" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST -The following example invalidates all API keys for the `native1` realm immediately: +The following example invalidates all API keys for the `native1` realm +immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "realm_name" : "native1" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST -The following example invalidates all API keys for the user `myuser` in all realms immediately: +The following example invalidates all API keys for the user `myuser` in all +realms immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "username" : "myuser" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST Finally, the following example invalidates all API keys for the user `myuser` in the `native1` realm immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/api_key +DELETE /_security/api_key { "username" : "myuser", "realm_name" : "native1" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST A successful call returns a JSON structure that contains the ids of the API keys that were invalidated, the ids of the API keys that had already been invalidated, and potentially a list of errors encountered while invalidating diff --git a/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc index 4056bb81bed63..13465dce6e862 100644 --- a/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc @@ -22,7 +22,8 @@ can no longer be used. That time period is defined by the The refresh tokens returned by the <> are only valid for 24 hours. They can also be used exactly once. -If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. +If you want to invalidate one or more access or refresh tokens immediately, use +this invalidate token API. ==== Request Body @@ -31,26 +32,56 @@ The following parameters can be specified in the body of a DELETE request and pertain to invalidating tokens: `token` (optional):: -(string) An access token. This parameter cannot be used any of `refresh_token`, `realm_name` or - `username` are used. +(string) An access token. This parameter cannot be used any of `refresh_token`, +`realm_name` or `username` are used. `refresh_token` (optional):: -(string) A refresh token. This parameter cannot be used any of `refresh_token`, `realm_name` or - `username` are used. +(string) A refresh token. This parameter cannot be used any of `refresh_token`, +`realm_name` or `username` are used. `realm_name` (optional):: -(string) The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. +(string) The name of an authentication realm. This parameter cannot be used with +either `refresh_token` or `token`. `username` (optional):: -(string) The username of a user. This parameter cannot be used with either `refresh_token` or `token` +(string) The username of a user. This parameter cannot be used with either +`refresh_token` or `token` -NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` -or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` -need to be specified. +NOTE: While all parameters are optional, at least one of them is required. More +specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to +be specified. ==== Examples -The following example invalidates the specified token immediately: +For example, if you create a token using the `client_credentials` grant type as +follows: + +[source,js] +-------------------------------------------------- +POST /_security/oauth2/token +{ + "grant_type" : "client_credentials" +} +-------------------------------------------------- +// CONSOLE +// TEST + +The get token API returns the following information about the access token: + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200 +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] + +This access token can now be immediately invalidated, as shown in the following +example: [source,js] -------------------------------------------------- @@ -59,57 +90,97 @@ DELETE /_security/oauth2/token "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TEST[continued] + +If you used the `password` grant type to obtain a token for a user, the response +might also contain a refresh token. For example: + +[source,js] +-------------------------------------------------- +POST /_security/oauth2/token +{ + "grant_type" : "password", + "username" : "test_admin", + "password" : "x-pack-test-password" +} +-------------------------------------------------- +// CONSOLE +// TEST + +The get token API returns the following information: + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200, + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] -whereas the following example invalidates the specified refresh token immediately: +The refresh token can now also be immediately invalidated as shown +in the following example: [source,js] -------------------------------------------------- DELETE /_security/oauth2/token { - "refresh_token" : "movUJjPGRRC0PQ7+NW0eag" + "refresh_token" : "vLBPvmAB6KvwvJZr27cS" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] +// TEST[continued] -The following example invalidates all access tokens and refresh tokens for the `saml1` realm immediately: +The following example invalidates all access tokens and refresh tokens for the +`saml1` realm immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/security/oauth2/token +DELETE /_security/oauth2/token { "realm_name" : "saml1" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST -The following example invalidates all access tokens and refresh tokens for the user `myuser` in all realms immediately: +The following example invalidates all access tokens and refresh tokens for the +user `myuser` in all realms immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/security/oauth2/token +DELETE /_security/oauth2/token { "username" : "myuser" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST -Finally, the following example invalidates all access tokens and refresh tokens for the user `myuser` in - the `saml1` realm immediately: +Finally, the following example invalidates all access tokens and refresh tokens +for the user `myuser` in the `saml1` realm immediately: [source,js] -------------------------------------------------- -DELETE /_xpack/security/oauth2/token +DELETE /_security/oauth2/token { "username" : "myuser", "realm_name" : "saml1" } -------------------------------------------------- -// NOTCONSOLE +// CONSOLE +// TEST -A successful call returns a JSON structure that contains the number of tokens that were invalidated, the number -of tokens that had already been invalidated, and potentially a list of errors encountered while invalidating -specific tokens. +A successful call returns a JSON structure that contains the number of tokens +that were invalidated, the number of tokens that had already been invalidated, +and potentially a list of errors encountered while invalidating specific tokens. [source,js] -------------------------------------------------- diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc index 54581d4c72195..1b41d89db0bf1 100644 --- a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -49,7 +49,7 @@ The value specified in the field rule can be one of the following types: | Simple String | Exactly matches the provided value. | "esadmin" | Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" | Regular Expression | Matches the provided value using a - {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" + {ref}/regexp-syntax.html[Lucene regexp]. | "/.\*-admin[0-9]*/" | Number | Matches an equivalent numerical value. | 7 | Null | Matches a null or missing value. | null | Array | Tests each element in the array in diff --git a/x-pack/docs/en/rest-api/security/ssl.asciidoc b/x-pack/docs/en/rest-api/security/ssl.asciidoc index d3480ac8bc006..932bbe6014d7a 100644 --- a/x-pack/docs/en/rest-api/security/ssl.asciidoc +++ b/x-pack/docs/en/rest-api/security/ssl.asciidoc @@ -78,12 +78,13 @@ node of {es}: [source,js] -------------------------------------------------- -GET /_xpack/certificates +GET /_ssl/certificates -------------------------------------------------- // CONSOLE -// TEST[skip:todo] +// TEST The API returns the following results: + [source,js] ---- [ @@ -116,4 +117,4 @@ The API returns the following results: } ] ---- -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc index 9e97e8d754a0e..1496a6dc7b315 100644 --- a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc @@ -5,7 +5,7 @@ Ack watch ++++ -{stack-ov}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you +<> enables you to manually throttle execution of the watch's actions. An action's _acknowledgement state_ is stored in the `status.actions..ack.state` structure. diff --git a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc index 74a98a00fa423..5d8fd20cc443e 100644 --- a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc @@ -5,8 +5,7 @@ Activate watch ++++ -A watch can be either -{stack-ov}/how-watcher-works.html#watch-active-state[active or inactive]. This +A watch can be either <>. This API enables you to activate a currently inactive watch. [float] diff --git a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc index 59625c1391119..cbb3f1c52e3fc 100644 --- a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc @@ -5,8 +5,7 @@ Deactivate watch ++++ -A watch can be either -{stack-ov}/how-watcher-works.html#watch-active-state[active or inactive]. This +A watch can be either <>. This API enables you to deactivate a currently active watch. [float] diff --git a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc index 8c7f747969373..6958709d638d1 100644 --- a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc @@ -59,14 +59,14 @@ This API supports the following fields: that will be used during the watch execution | `ignore_condition` | no | false | When set to `true`, the watch execution uses the - {stack-ov}/condition-always.html[Always Condition]. + <>. This can also be specified as an HTTP parameter. | `alternative_input` | no | null | When present, the watch uses this object as a payload instead of executing its own input. | `action_modes` | no | null | Determines how to handle the watch actions as part of the - watch execution. See <> + watch execution. See <> for more information. | `record_execution` | no | false | When set to `true`, the watch record representing the watch @@ -75,8 +75,7 @@ This API supports the following fields: watch is updated, possibly throttling subsequent executions. This can also be specified as an HTTP parameter. -| `watch` | no | null | When present, this - {stack-ov}/how-watcher-works.html#watch-definition[watch] is used +| `watch` | no | null | When present, this <> is used instead of the one specified in the request. This watch is not persisted to the index and record_execution cannot be set. |====== @@ -94,12 +93,12 @@ are five possible modes an action can be associated with: | `simulate` | The action execution is simulated. Each action type define its own simulation operation mode. For example, the - {stack-ov}/actions-email.html[email] action creates + <> creates the email that would have been sent but does not actually send it. In this mode, the action might be throttled if the current state of the watch indicates it should be. -| `force_simulate` | Similar to the the `simulate` mode, except the action is +| `force_simulate` | Similar to the `simulate` mode, except the action is not be throttled even if the current state of the watch indicates it should be. diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 89b79b5680056..84c5ef47380e9 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -5,7 +5,7 @@ Put watch ++++ -The PUT watch API either registers a new watch in {watcher} or update an +The PUT watch API either registers a new watch in {watcher} or updates an existing one. [float] @@ -21,14 +21,14 @@ the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. -IMPORTANT: Putting a watch must be done via this API only. Do not put a watch - directly to the `.watches` index using the Elasticsearch Index API. - If {es} {security-features} are enabled, make sure no `write` - privileges are granted to anyone over the `.watches` index. +IMPORTANT: You must use {kib} or this API to create a watch. Do not put a watch + directly to the `.watches` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.watches` index. When adding a watch you can also define its initial -{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that -by setting the `active` parameter. +<>. You do that by setting the `active` +parameter. [float] ==== Path Parameters @@ -52,30 +52,41 @@ A watch has the following fields: |====== | Name | Description -| `trigger` | The {xpack-ref}/trigger.html[trigger] that defines when +| `trigger` | The <> that defines when the watch should run. -| `input` | The {xpack-ref}/input.html[input] that defines the input +| `input` | The <> that defines the input that loads the data for the watch. -| `condition` | The {xpack-ref}/condition.html[condition] that defines if +| `condition` | The <> that defines if the actions should be run. -| `actions` | The list of {xpack-ref}/actions.html[actions] that will be +| `actions` | The list of <> that will be run if the condition matches | `metadata` | Metadata json that will be copied into the history entries. | `throttle_period` | The minimum time between actions being run, the default for this is 5 seconds. This default can be changed in the - config file with the setting `xpack.watcher.throttle.period.default_period`. + config file with the setting + `xpack.watcher.throttle.period.default_period`. If both + this value and the `throttle_period_in_millis` parameter + are specified, {watcher} uses the last parameter + included in the request. + +| `throttle_period_in_millis` | Minimum time in milliseconds between actions + being run. Defaults to `5000`. If both this + value and the `throttle_period` parameter are + specified, {watcher} uses the last parameter + included in the request. + |====== [float] ==== Authorization You must have `manage_watcher` cluster privileges to use this API. For more -information, see {xpack-ref}/security-privileges.html[Security Privileges]. +information, see {stack-ov}/security-privileges.html[Security Privileges]. [float] ==== Security Integration @@ -148,7 +159,7 @@ PUT _watcher/watch/my-watch // CONSOLE When you add a watch you can also define its initial -{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that +<>. You do that by setting the `active` parameter. The following command adds a watch and sets it to be inactive by default: diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index 19947e40b5553..dfa0c72b5e2d9 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -18,7 +18,7 @@ The following is a list of the events that can be generated: realm type. | `access_denied` | | | Logged when an authenticated user attempts to execute an action they do not have the necessary - <> to perform. + <> to perform. | `access_granted` | | | Logged when an authenticated user attempts to execute an action they have the necessary privilege to perform. When the `system_access_granted` event is included, all system @@ -28,7 +28,7 @@ The following is a list of the events that can be generated: another user that they have the necessary privileges to do. | `run_as_denied` | | | Logged when an authenticated user attempts to <> another user action they do not have the necessary - <> to do so. + <> to do so. | `tampered_request` | | | Logged when the {security-features} detect that the request has been tampered with. Typically relates to `search/scroll` requests when the scroll ID is believed to have been diff --git a/x-pack/docs/en/security/auditing/index.asciidoc b/x-pack/docs/en/security/auditing/index.asciidoc index ba79779629a44..8e708efaffca9 100644 --- a/x-pack/docs/en/security/auditing/index.asciidoc +++ b/x-pack/docs/en/security/auditing/index.asciidoc @@ -1,12 +1,7 @@ - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/overview.asciidoc include::overview.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/event-types.asciidoc include::event-types.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/output-logfile.asciidoc include::output-logfile.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing/auditing-search-queries.asciidoc include::auditing-search-queries.asciidoc[] diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc index f5b1dbad79ae9..422d987fe343f 100644 --- a/x-pack/docs/en/security/auditing/output-logfile.asciidoc +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -132,7 +132,7 @@ Please take time to review these policies whenever your system architecture chan A policy is a named set of filter rules. Each filter rule applies to a single event attribute, one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines -a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit +a list of {ref}/regexp-syntax.html[Lucene regexp], *any* of which has to match the value of the audit event attribute for the rule to match. A policy matches an event if *all* the rules comprising it match the event. An audit event is ignored, therefore not printed, if it matches *any* policy. All other diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc new file mode 100644 index 0000000000000..0f75cb3f7a445 --- /dev/null +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -0,0 +1,80 @@ +[role="xpack"] +[[active-directory-realm]] +=== Active Directory user authentication + +You can configure {stack} {security-features} to communicate with Active +Directory to authenticate users. To integrate with Active Directory, you +configure an `active_directory` realm and map Active Directory users and groups +to roles in the <>. + +See {ref}/configuring-ad-realm.html[Configuring an active directory realm]. + +The {security-features} use LDAP to communicate with Active Directory, so +`active_directory` realms are similar to <>. Like +LDAP directories, Active Directory stores users and groups hierarchically. The +directory's hierarchy is built from containers such as the _organizational unit_ +(`ou`), _organization_ (`o`), and _domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, for +example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +The {security-features} supports only Active Directory security groups. You +cannot map distribution groups to roles. + +NOTE: When you use Active Directory for authentication, the username entered by + the user is expected to match the `sAMAccountName` or `userPrincipalName`, + not the common name. + +The Active Directory realm authenticates users using an LDAP bind request. After +authenticating the user, the realm then searches to find the user's entry in +Active Directory. Once the user has been found, the Active Directory realm then +retrieves the user's group memberships from the `tokenGroups` attribute on the +user's entry in Active Directory. + +[[ad-load-balancing]] +==== Load balancing and failover +The `load_balance.type` setting can be used at the realm level to configure how +the {security-features} should interact with multiple Active Directory servers. +Two modes of operation are supported: failover and load balancing. + +See +{ref}/security-settings.html#load-balancing[Load balancing and failover settings]. + +[[ad-settings]] +==== Active Directory realm settings + +See +{ref}/security-settings.html#ref-ad-settings[Active Directory realm settings]. + +[[mapping-roles-ad]] +==== Mapping Active Directory users and groups to roles + +See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm]. + +[[ad-user-metadata]] +==== User metadata in Active Directory realms +When a user is authenticated via an Active Directory realm, the following +properties are populated in the user's _metadata_: + +|======================= +| Field | Description +| `ldap_dn` | The distinguished name of the user. +| `ldap_groups` | The distinguished name of each of the groups that were + resolved for the user (regardless of whether those + groups were mapped to a role). +|======================= + +This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API] and can be used with +<> in roles. + +Additional metadata can be extracted from the Active Directory server by configuring +the `metadata` setting on the Active Directory realm. + +[[active-directory-ssl]] +==== Setting up SSL between Elasticsearch and Active Directory + +See +{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory]. diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc new file mode 100644 index 0000000000000..ec9ea3af72365 --- /dev/null +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -0,0 +1,204 @@ +[role="xpack"] +[[built-in-users]] +=== Built-in users + +The {stack-security-features} provide built-in user credentials to help you get +up and running. These users have a fixed set of privileges and cannot be +authenticated until their passwords have been set. The `elastic` user can be +used to <>. + +`elastic`:: A built-in _superuser_. See <>. +`kibana`:: The user Kibana uses to connect and communicate with Elasticsearch. +`logstash_system`:: The user Logstash uses when storing monitoring information in Elasticsearch. +`beats_system`:: The user the Beats use when storing monitoring information in Elasticsearch. +`apm_system`:: The user the APM server uses when storing monitoring information in {es}. +`remote_monitoring_user`:: The user {metricbeat} uses when collecting and +storing monitoring information in {es}. It has the `remote_monitoring_agent` and +`remote_monitoring_collector` built-in roles. + + +[float] +[[built-in-user-explanation]] +==== How the built-in users work +These built-in users are stored in a special `.security` index, which is managed +by {es}. If a built-in user is disabled or its password +changes, the change is automatically reflected on each node in the cluster. If +your `.security` index is deleted or restored from a snapshot, however, any +changes you have applied are lost. + +Although they share the same API, the built-in users are separate and distinct +from users managed by the <>. Disabling the native +realm will not have any effect on the built-in users. The built-in users can +be disabled individually, using the +{ref}/security-api-disable-user.html[disable users API]. + +[float] +[[bootstrap-elastic-passwords]] +==== The Elastic bootstrap password + +When you install {es}, if the `elastic` user does not already have a password, +it uses a default bootstrap password. The bootstrap password is a transient +password that enables you to run the tools that set all the built-in user passwords. + +By default, the bootstrap password is derived from a randomized `keystore.seed` +setting, which is added to the keystore during installation. You do not need +to know or change this bootstrap password. If you have defined a +`bootstrap.password` setting in the keystore, however, that value is used instead. +For more information about interacting with the keystore, see +{ref}/secure-settings.html[Secure Settings]. + +NOTE: After you <>, +in particular for the `elastic` user, there is no further use for the bootstrap +password. + +[float] +[[set-built-in-user-passwords]] +==== Setting built-in user passwords + +You must set the passwords for all built-in users. + +The +elasticsearch-setup-passwords+ tool is the simplest method to set the +built-in users' passwords for the first time. It uses the `elastic` user's +bootstrap password to run user management API requests. For example, you can run +the command in an "interactive" mode, which prompts you to enter new passwords +for the `elastic`, `kibana`, `logstash_system`, `beats_system`, `apm_system`, +and `remote_monitoring_user` users: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords interactive +-------------------------------------------------- + +For more information about the command options, see +{ref}/setup-passwords.html[elasticsearch-setup-passwords]. + +IMPORTANT: After you set a password for the `elastic` user, the bootstrap +password is no longer valid; you cannot run the `elasticsearch-setup-passwords` +command a second time. + +Alternatively, you can set the initial passwords for the built-in users by using +the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API]. These methods are +more complex. You must supply the `elastic` user and its bootstrap password to +log into {kib} or run the API. This requirement means that you cannot use the +default bootstrap password that is derived from the `keystore.seed` setting. +Instead, you must explicitly set a `bootstrap.password` setting in the keystore +before you start {es}. For example, the following command prompts you to enter a +new bootstrap password: + +[source,shell] +---------------------------------------------------- +bin/elasticsearch-keystore add "bootstrap.password" +---------------------------------------------------- + +You can then start {es} and {kib} and use the `elastic` user and bootstrap +password to log into {kib} and change the passwords. Alternatively, you can +submit Change Password API requests for each built-in user. These methods are +better suited for changing your passwords after the initial setup is complete, +since at that point the bootstrap password is no longer required. + +[[add-built-in-user-passwords]] + +[float] +[[add-built-in-user-kibana]] +==== Adding built-in user passwords to {kib} + +After the `kibana` user password is set, you need to update the {kib} server +with the new password by setting `elasticsearch.password` in the `kibana.yml` +configuration file: + +[source,yaml] +----------------------------------------------- +elasticsearch.password: kibanapassword +----------------------------------------------- + +See {kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. + +[float] +[[add-built-in-user-logstash]] +==== Adding built-in user passwords to {ls} + +The `logstash_system` user is used internally within Logstash when +monitoring is enabled for Logstash. + +To enable this feature in Logstash, you need to update the Logstash +configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in +the `logstash.yml` configuration file: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.password: logstashpassword +---------------------------------------------------------- + +If you have upgraded from an older version of Elasticsearch, +the `logstash_system` user may have defaulted to _disabled_ for security reasons. +Once the password has been changed, you can enable the user via the following API call: + +[source,js] +--------------------------------------------------------------------- +PUT _security/user/logstash_system/_enable +--------------------------------------------------------------------- +// CONSOLE + +See {logstash-ref}/ls-security.html#ls-monitoring-user[Configuring credentials for {ls} monitoring]. + +[float] +[[add-built-in-user-beats]] +==== Adding built-in user passwords to Beats + +The `beats_system` user is used internally within Beats when monitoring is +enabled for Beats. + +To enable this feature in Beats, you need to update the configuration for each +of your beats to reference the correct username and password. For example: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.username: beats_system +xpack.monitoring.elasticsearch.password: beatspassword +---------------------------------------------------------- + +For example, see {metricbeat-ref}/monitoring.html[Monitoring {metricbeat}]. + +The `remote_monitoring_user` is used when {metricbeat} collects and stores +monitoring data for the {stack}. See <>. + +If you have upgraded from an older version of {es}, then you may not have set a +password for the `beats_system` or `remote_monitoring_user` users. If this is +the case, then you should use the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API] to set a password +for these users. + +[float] +[[add-built-in-user-apm]] +==== Adding built-in user passwords to APM + +The `apm_system` user is used internally within APM when monitoring is enabled. + +To enable this feature in APM, you need to update the +{apm-server-ref}/configuring-howto-apm-server.html[APM configuration file] to +reference the correct username and password. For example: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.username: apm_system +xpack.monitoring.elasticsearch.password: apmserverpassword +---------------------------------------------------------- + +See {apm-server-ref}/monitoring.html[Monitoring APM Server]. + +If you have upgraded from an older version of {es}, then you may not have set a +password for the `apm_system` user. If this is the case, +then you should use the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API] to set a password +for these users. + +[float] +[[disabling-default-password]] +==== Disabling default password functionality +[IMPORTANT] +============================================================================= +This setting is deprecated. The elastic user no longer has a default password. +The password must be set before the user can be used. +See <>. +============================================================================= diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index c023a8492b166..c7f793d92f32a 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -132,7 +132,7 @@ See <>. . Map LDAP groups to roles. + -- -The `ldap` realm enables you to map LDAP users to to roles via their LDAP +The `ldap` realm enables you to map LDAP users to roles via their LDAP groups, or other metadata. This role mapping can be configured via the {ref}/security-api-put-role-mapping.html[add role mapping API] or by using a file stored on each node. When a user authenticates with LDAP, the privileges diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc new file mode 100644 index 0000000000000..9261c33eb1f43 --- /dev/null +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -0,0 +1,27 @@ +[role="xpack"] +[[file-realm]] +=== File-based user authentication + +You can manage and authenticate users with the built-in `file` realm. +With the `file` realm, users are defined in local files on each node in the cluster. + +IMPORTANT: As the administrator of the cluster, it is your responsibility to +ensure the same users are defined on every node in the cluster. The {stack} +{security-features} do not deliver any mechanism to guarantee this. + +The `file` realm is primarily supported to serve as a fallback/recovery realm. It +is mostly useful in situations where all users locked themselves out of the system +(no one remembers their username/password). In this type of scenarios, the `file` +realm is your only way out - you can define a new `admin` user in the `file` realm +and use it to log in and reset the credentials of all other users. + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the realms you +specify are used for authentication. To use the `file` realm as a fallback, you +must include it in the realm chain. + +To define users, the {security-features} provide the +{ref}/users-command.html[users] command-line tool. This tool enables you to add +and remove users, assign user roles, and manage user passwords. + +For more information, see +{ref}/configuring-file-realm.html[Configuring a file realm]. diff --git a/x-pack/docs/en/security/authentication/index.asciidoc b/x-pack/docs/en/security/authentication/index.asciidoc new file mode 100644 index 0000000000000..0723f5ee30b37 --- /dev/null +++ b/x-pack/docs/en/security/authentication/index.asciidoc @@ -0,0 +1,18 @@ + +include::overview.asciidoc[] +include::built-in-users.asciidoc[] +include::internal-users.asciidoc[] +include::token-authentication-services.asciidoc[] +include::realms.asciidoc[] +include::realm-chains.asciidoc[] +include::active-directory-realm.asciidoc[] +include::file-realm.asciidoc[] +include::ldap-realm.asciidoc[] +include::native-realm.asciidoc[] +include::pki-realm.asciidoc[] +include::saml-realm.asciidoc[] +include::kerberos-realm.asciidoc[] +include::custom-realm.asciidoc[] +include::anonymous-access.asciidoc[] +include::user-cache.asciidoc[] +include::saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc new file mode 100644 index 0000000000000..7ae7dc5bc1723 --- /dev/null +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -0,0 +1,14 @@ +[role="xpack"] +[[internal-users]] +=== Internal users + +The {stack-security-features} use three _internal_ users (`_system`, `_xpack`, +and `_xpack_security`), which are responsible for the operations that take place +inside an {es} cluster. + +These users are only used by requests that originate from within the cluster. +For this reason, they cannot be used to authenticate against the API and there +is no password to manage or reset. + +From time-to-time you may find a reference to one of these users inside your +logs, including <>. diff --git a/x-pack/docs/en/security/authentication/kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/kerberos-realm.asciidoc new file mode 100644 index 0000000000000..a8d6a74cb6092 --- /dev/null +++ b/x-pack/docs/en/security/authentication/kerberos-realm.asciidoc @@ -0,0 +1,63 @@ +[role="xpack"] +[[kerberos-realm]] +=== Kerberos authentication + +You can configure the {stack} {security-features} to support Kerberos V5 +authentication, an industry standard protocol to authenticate users in {es}. + +NOTE: You cannot use the Kerberos realm to authenticate users in {kib} +and on the transport network layer. + +To authenticate users with Kerberos, you need to +{ref}/configuring-kerberos-realm.html[configure a Kerberos realm] and +<>. +For more information on realm settings, see +{ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. + +[[kerberos-terms]] +==== Key concepts + +There are a few terms and concepts that you'll encounter when you're setting up +Kerberos realms: + +_kdc_:: +Key Distribution Center. A service that issues Kerberos tickets. + +_principal_:: +A Kerberos principal is a unique identity to which Kerberos can assign +tickets. It can be used to identify a user or a service provided by a +server. ++ +-- +Kerberos V5 principal names are of format `primary/instance@REALM`, where +`primary` is a user name. + +`instance` is an optional string that qualifies the primary and is separated +by a slash(`/`) from the primary. For a user, usually it is not used; for +service hosts, it is the fully qualified domain name of the host. + +`REALM` is the Kerberos realm. Usually it is is the domain name in upper case. +An example of a typical user principal is `user@ES.DOMAIN.LOCAL`. An example of +a typical service principal is `HTTP/es.domain.local@ES.DOMAIN.LOCAL`. +-- + +_realm_:: +Realms define the administrative boundary within which the authentication server +has authority to authenticate users and services. + +_keytab_:: +A file that stores pairs of principals and encryption keys. + +IMPORTANT: Anyone with read permissions to this file can use the +credentials in the network to access other services so it is important +to protect it with proper file permissions. + +_krb5.conf_:: +A file that contains Kerberos configuration information such as the default realm +name, the location of Key distribution centers (KDC), realms information, +mappings from domain names to Kerberos realms, and default configurations for +realm session key encryption types. + +_ticket granting ticket (TGT)_:: +A TGT is an authentication ticket generated by the Kerberos authentication +server. It contains an encrypted authenticator. \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc new file mode 100644 index 0000000000000..a022e71e7fbc3 --- /dev/null +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -0,0 +1,88 @@ +[role="xpack"] +[[ldap-realm]] +=== LDAP user authentication + +You can configure the {stack} {security-features} to communicate with a +Lightweight Directory Access Protocol (LDAP) server to authenticate users. To +integrate with LDAP, you configure an `ldap` realm and map LDAP groups to user +roles in the <>. + +LDAP stores users and groups hierarchically, similar to the way folders are +grouped in a file system. An LDAP directory's hierarchy is built from containers +such as the _organizational unit_ (`ou`), _organization_ (`o`), and +_domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, +for example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +The `ldap` realm supports two modes of operation, a user search mode +and a mode with specific templates for user DNs. + +[[ldap-user-search]] +==== User search mode and user DN templates mode + +See {ref}/configuring-ldap-realm.html[Configuring an LDAP Realm]. + +[[ldap-load-balancing]] +==== Load balancing and failover +The `load_balance.type` setting can be used at the realm level to configure how +the {security-features} should interact with multiple LDAP servers. The +{security-features} support both failover and load balancing modes of operation. + +See +{ref}/security-settings.html#load-balancing[Load balancing and failover settings]. + +[[ldap-settings]] +==== LDAP realm settings + +See {ref}/security-settings.html#ref-ldap-settings[LDAP realm settings]. + +[[mapping-roles-ldap]] +==== Mapping LDAP groups to roles + +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `ldap` realm the users are managed externally in the LDAP server, +the expectation is that their roles are managed there as well. In fact, LDAP +supports the notion of groups, which often represent user roles for different +systems in the organization. + +The `ldap` realm enables you to map LDAP users to roles via their LDAP +groups, or other metadata. This role mapping can be configured via the +{ref}/security-api-put-role-mapping.html[add role mapping API] or by using a +file stored on each node. When a user authenticates with LDAP, the privileges +for that user are the union of all privileges defined by the roles to which +the user is mapped. For more information, see +{ref}/configuring-ldap-realm.html[Configuring an LDAP realm]. + +[[ldap-user-metadata]] +==== User metadata in LDAP realms +When a user is authenticated via an LDAP realm, the following properties are +populated in the user's _metadata_: + +|======================= +| Field | Description +| `ldap_dn` | The distinguished name of the user. +| `ldap_groups` | The distinguished name of each of the groups that were + resolved for the user (regardless of whether those + groups were mapped to a role). +|======================= + +This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API], and can be used with +<> in roles. + +Additional fields can be included in the user's metadata by configuring +the `metadata` setting on the LDAP realm. This metadata is available for use +with the <> or in +<>. + +[[ldap-ssl]] +==== Setting up SSL between Elasticsearch and LDAP + +See +{ref}/configuring-tls.html#tls-ldap[Encrypting communications between {es} and LDAP]. diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc new file mode 100644 index 0000000000000..7905064b5cc66 --- /dev/null +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -0,0 +1,32 @@ +[role="xpack"] +[[native-realm]] +=== Native user authentication + +The easiest way to manage and authenticate users is with the internal `native` +realm. You can use the REST APIs or Kibana to add and remove users, assign user +roles, and manage user passwords. + +[[native-realm-configuration]] +[float] +==== Configuring a native realm + +See {ref}/configuring-native-realm.html[Configuring a native realm]. + +[[native-settings]] +==== Native realm settings + +See {ref}/security-settings.html#ref-native-settings[Native realm settings]. + +[[managing-native-users]] +==== Managing native users + +The {stack} {security-features} enable you to easily manage users in {kib} on the +*Management / Security / Users* page. + +Alternatively, you can manage users through the `user` API. For more +information and examples, see +{ref}/security-api.html#security-user-apis[user management APIs]. + +[[migrating-from-file]] +NOTE: To migrate file-based users to the `native` realm, use the +{ref}/migrate-tool.html[migrate tool]. diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc new file mode 100644 index 0000000000000..46f8b65b22ff1 --- /dev/null +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -0,0 +1,31 @@ +[role="xpack"] +[[setting-up-authentication]] +== User authentication + +Authentication identifies an individual. To gain access to restricted resources, +a user must prove their identity, via passwords, credentials, or some other +means (typically referred to as authentication tokens). + +The {stack} authenticates users by identifying the users behind the requests +that hit the cluster and verifying that they are who they claim to be. The +authentication process is handled by one or more authentication services called +<>. + +You can use the native support for managing and authenticating users, or +integrate with external user management systems such as LDAP and Active +Directory. + +The {stack-security-features} provide built-in realms such as `native`,`ldap`, +`active_directory`, `pki`, `file`, and `saml`. If none of the built-in realms +meet your needs, you can also build your own custom realm and plug it into the +{stack}. + +When {security-features} are enabled, depending on the realms you've configured, +you must attach your user credentials to the requests sent to {es}. For example, +when using realms that support usernames and passwords you can simply attach +{wikipedia}/Basic_access_authentication[basic auth] header to the requests. + +The {security-features} provide two services: the token service and the api key +service. You can use these services to exchange the current authentication for +a token or key. This token or key can then be used as credentials for authenticating +new requests. These services are enabled by default when TLS/SSL is enabled for HTTP. diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc new file mode 100644 index 0000000000000..41976c0425a06 --- /dev/null +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -0,0 +1,21 @@ +[role="xpack"] +[[pki-realm]] +=== PKI user authentication + +You can configure {stack} {security-features} to use Public Key Infrastructure +(PKI) certificates to authenticate users in {es}. This requires clients to +present X.509 certificates. + +NOTE: You cannot use PKI certificates to authenticate users in {kib}. + +To use PKI in {es}, you configure a PKI realm, enable client authentication on +the desired network layers (transport or http), and map the Distinguished Names +(DNs) from the user certificates to roles in the +<>. + +See {ref}/configuring-pki-realm.html[Configuring a PKI realm]. + +[[pki-settings]] +==== PKI realm settings + +See {ref}/security-settings.html#ref-pki-settings[PKI realm settings]. diff --git a/x-pack/docs/en/security/authentication/realm-chains.asciidoc b/x-pack/docs/en/security/authentication/realm-chains.asciidoc new file mode 100644 index 0000000000000..46e105d0e47d6 --- /dev/null +++ b/x-pack/docs/en/security/authentication/realm-chains.asciidoc @@ -0,0 +1,99 @@ +[role="xpack"] +[[realm-chains]] +=== Realm chains + +<> live within a _realm chain_. It is essentially a prioritized list of +configured realms (typically of various types). +Realms are consulted in ascending order (that is to say, the realm with the lowest `order` value is consulted first). +You should make sure each +configured realm has a distinct `order` setting. In the event that two or more +realms have the same `order`, they will be processed in `name` order. +During the authentication process, {stack} {security-features} will consult and +try to authenticate the request one realm at a time. +Once one of the realms successfully authenticates the request, the authentication +is considered to be successful and the authenticated user will be associated +with the request (which will then proceed to the authorization phase). If a realm +cannot authenticate the request, the next in line realm in the chain will be +consulted. If all realms in the chain could not authenticate the request, the +authentication is then considered to be unsuccessful and an authentication error +will be returned (as HTTP status code `401`). + +NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after + several successive failed login attempts. If the same username exists in + multiple realms, unintentional account lockouts are possible. For more + information, please see <>. + +The default realm chain contains the `native` and `file` realms. To explicitly, +configure a realm chain, you specify the chain in `elasticsearch.yml`. When you +configure a realm chain, only the realms you specify are used for authentication. +To use the `native` and `file` realms, you must include them in the chain. + +The following snippet configures a realm chain that includes the `file` and +`native` realms, as well as two LDAP realms and an Active Directory realm. + +[source,yaml] +---------------------------------------- +xpack.security.authc: + realms: + + file: + type: file + order: 0 + + native: + type: native + order: 1 + + ldap1: + type: ldap + order: 2 + enabled: false + url: 'url_to_ldap1' + ... + + ldap2: + type: ldap + order: 3 + url: 'url_to_ldap2' + ... + + ad1: + type: active_directory + order: 4 + url: 'url_to_ad' +---------------------------------------- + +As can be seen above, each realm has a unique name that identifies it and each +realm type dictates its own set of required and optional settings. That said, +there are +{ref}/security-settings.html#ref-realm-settings[settings that are common to all realms]. + +[[authorization_realms]] +==== Delegating authorization to another realm + +Some realms have the ability to perform _authentication_ internally, but delegate the +lookup and assignment of roles (that is, _authorization_) to another realm. + +For example, you may wish to use a PKI realm to authenticate your users with +TLS client certificates, but then lookup that user in an LDAP realm and use +their LDAP group assignments to determine their roles in Elasticsearch. + +Any realm that supports retrieving users (without needing their credentials) +can be used as an _authorization realm_ (that is, its name may appear as one of +the values in the list of `authorization_realms`). See <> for +further explanation on which realms support this. + +For realms that support this feature, it can be enabled by configuring the +`authorization_realms` setting on the authenticating realm. Check the list of +{ref}/security-settings.html#realm-settings[supported settings] for each realm to see if they support the `authorization_realms` setting. + +If delegated authorization is enabled for a realm, it authenticates the user in +its standard manner (including relevant caching) then looks for that user in the +configured list of authorization realms. It tries each realm in the order they +are specified in the `authorization_realms` setting. The user is retrieved by +principal - the user must have identical usernames in the _authentication_ and +_authorization realms_. If the user cannot be found in any of the authorization +realms, authentication fails. + +NOTE: Delegated authorization requires a +https://www.elastic.co/subscriptions[Platinum or Trial license]. diff --git a/x-pack/docs/en/security/authentication/realms.asciidoc b/x-pack/docs/en/security/authentication/realms.asciidoc new file mode 100644 index 0000000000000..b5465274b925c --- /dev/null +++ b/x-pack/docs/en/security/authentication/realms.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[[realms]] +=== Realms + +The {stack-security-features} authenticate users by using realms and one or more +<>. + +A _realm_ is used to resolve and authenticate users based on authentication +tokens. The {security-features} provide the following built-in realms: + +_native_:: +An internal realm where users are stored in a dedicated {es} index. +This realm supports an authentication token in the form of username and password, +and is available by default when no realms are explicitly configured. The users +are managed via the {ref}/security-api.html#security-user-apis[user management APIs]. +See <>. + +_ldap_:: +A realm that uses an external LDAP server to authenticate the +users. This realm supports an authentication token in the form of username and +password, and requires explicit configuration in order to be used. See +<>. + +_active_directory_:: +A realm that uses an external Active Directory Server to authenticate the +users. With this realm, users are authenticated by usernames and passwords. +See <>. + +_pki_:: +A realm that authenticates users using Public Key Infrastructure (PKI). This +realm works in conjunction with SSL/TLS and identifies the users through the +Distinguished Name (DN) of the client's X.509 certificates. See <>. + +_file_:: +An internal realm where users are defined in files stored on each node in the +{es} cluster. This realm supports an authentication token in the form +of username and password and is always available. See <>. + +_saml_:: +A realm that facilitates authentication using the SAML 2.0 Web SSO protocol. +This realm is designed to support authentication through {kib} and is not +intended for use in the REST API. See <>. + +_kerberos_:: +A realm that authenticates a user using Kerberos authentication. Users are +authenticated on the basis of Kerberos tickets. See <>. + +The {security-features} also support custom realms. If you need to integrate +with another authentication system, you can build a custom realm plugin. For +more information, see <>. + +==== Internal and external realms + +Realm types can roughly be classified in two categories: + +Internal:: Realms that are internal to Elasticsearch and don't require any +communication with external parties. They are fully managed by the {stack} +{security-features}. There can only be a maximum of one configured realm per +internal realm type. The {security-features} provides two internal realm +types: `native` and `file`. + +External:: Realms that require interaction with parties/components external to +{es}, typically, with enterprise grade identity management systems. Unlike +internal realms, there can be as many external realms as one would like - each +with its own unique name and configuration. The {security-features} provide the +following external realm types: `ldap`, `active_directory`, `saml`, `kerberos`, +and `pki`. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 9201a5520f76a..2f869373ac27e 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -862,6 +862,7 @@ It is possible to have one or more {kib} instances that use SAML, while other instances use basic authentication against another realm type (e.g. <> or <>). +[[saml-troubleshooting]] === Troubleshooting SAML Realm Configuration The SAML 2.0 specification offers a lot of options and flexibility for the implementers diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc new file mode 100644 index 0000000000000..44fc0ff5b8b32 --- /dev/null +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -0,0 +1,41 @@ +[role="xpack"] +[[saml-realm]] +=== SAML authentication +The {stack} {security-features} support user authentication using SAML +single sign-on (SSO). The {security-features} provide this support using the Web +Browser SSO profile of the SAML 2.0 protocol. + +This protocol is specifically designed to support authentication via an +interactive web browser, so it does not operate as a standard authentication +realm. Instead, there are {kib} and {es} {security-features} that work +together to enable interactive SAML sessions. + +This means that the SAML realm is not suitable for use by standard REST clients. +If you configure a SAML realm for use in {kib}, you should also configure +another realm, such as the <> in your authentication +chain. + +In order to simplify the process of configuring SAML authentication within the +Elastic Stack, there is a step-by-step guide to +<>. + +The remainder of this document will describe {es} specific configuration options +for SAML realms. + +[[saml-settings]] +==== SAML realm settings + +See {ref}/security-settings.html#ref-saml-settings[SAML realm settings]. + +==== SAML realm signing settings + +See {ref}/security-settings.html#ref-saml-signing-settings[SAML realm signing settings]. + +==== SAML realm encryption settings + +See {ref}/security-settings.html#ref-saml-encryption-settings[SAML realm encryption settings]. + +==== SAML realm SSL settings + +See {ref}/security-settings.html#ref-saml-ssl-settings[SAML realm SSL settings]. + diff --git a/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc new file mode 100644 index 0000000000000..04e8238a89ed3 --- /dev/null +++ b/x-pack/docs/en/security/authentication/token-authentication-services.asciidoc @@ -0,0 +1,58 @@ +[role="xpack"] +[[token-authentication-services]] +=== Token-based authentication services + +The {stack-security-features} authenticate users by using realms and one or more token-based +authentication services. The token-based authentication services are used for +authentication and for the management of tokens. These tokens can be used as +credentials attached to requests that are sent to {es}. When {es} receives a request +that must be authenticated, it consults first the token-based authentication +services then the realm chain. + +The {security-features} provide the following built-in token-based authentication +services, which are listed in the order they are consulted: + +_token-service_:: +The token service uses the {ref}/security-api-get-token.html[get token API] to +generate access tokens and refresh tokens based on the OAuth2 specification. +The access token is a short-lived token. By default, it expires after 20 minutes +but it can be configured to last a maximum of 1 hour. It can be refreshed by +using a refresh token, which has a lifetime of 24 hours. The access token is a +bearer token. You can use it by sending a request with an `Authorization` +header with a value that has the prefix "Bearer " followed by the value of the +access token. For example: ++ +-- +[source,shell] +-------------------------------------------------- +curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" http://localhost:9200/_cluster/health +-------------------------------------------------- +// NOTCONSOLE +-- + +_api-key-service_:: +The API key service uses the +{ref}/security-api-create-api-key.html[create API key API] to generate API keys. +By default, the API keys do not expire. When you make a request to create API +keys, you can specify an expiration and permissions for the API key. The +permissions are limited by the authenticated user's permissions. You can use the +API key by sending a request with an `Authorization` header with a value that +has the prefix "ApiKey " followed by the credentials. The credentials are the +base64 encoding of the API key ID and the API key joined by a colon. For example: ++ +-- +[source,shell] +-------------------------------------------------- +curl -H "Authorization: ApiKey VnVhQ2ZHY0JDZGJrUW0tZTVhT3g6dWkybHAyYXhUTm1zeWFrdzl0dk5udw==" http://localhost:9200/_cluster/health +-------------------------------------------------- +// NOTCONSOLE +-- + +Depending on your use case, you may want to decide on the lifetime of the tokens +generated by these services. You can then use this information to decide which +service to use to generate and manage the tokens. Non-expiring API keys may seem +like the easy option but you must consider the security implications that come +with non-expiring keys. Both the _token-service_ and _api-key-service_ permit +you to invalidate the tokens. See +{ref}/security-api-invalidate-token.html[invalidate token API] and +{ref}/security-api-invalidate-api-key.html[invalidate API key API]. diff --git a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc new file mode 100644 index 0000000000000..daf115df8b930 --- /dev/null +++ b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc @@ -0,0 +1,149 @@ +[role="xpack"] +[[built-in-roles]] +=== Built-in roles + +The {stack-security-features} apply a default role to all users, including +<>. The default role enables users to access +the authenticate endpoint, change their own passwords, and get information about +themselves. + +There is also a set of built-in roles you can explicitly assign to users. These +roles have a fixed set of privileges and cannot be updated. + +[[built-in-roles-apm-system]] `apm_system` :: +Grants access necessary for the APM system user to send system-level data +(such as monitoring) to {es}. + +[[built-in-roles-apm-user]] `apm_user` :: +Grants the privileges required for APM users (such as `read` and +`view_index_metadata` privileges on the `apm-*` and `.ml-anomalies*` indices). + +[[built-in-roles-beats-admin]] `beats_admin` :: +Grants access to the `.management-beats` index, which contains configuration +information for the Beats. + +[[built-in-roles-beats-system]] `beats_system` :: +Grants access necessary for the Beats system user to send system-level data +(such as monitoring) to {es}. ++ +-- +[NOTE] +=============================== +* This role should not be assigned to users as the granted permissions may +change between releases. +* This role does not provide access to the beats indices and is not +suitable for writing beats output to {es}. +=============================== + +-- + +[[built-in-roles-ingest-user]] `ingest_admin` :: +Grants access to manage *all* index templates and *all* ingest pipeline configurations. ++ +NOTE: This role does *not* provide the ability to create indices; those privileges +must be defined in a separate role. + +[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: +Grants access to the {kib} Dashboard and read-only permissions to Kibana. +This role does not have access to editing tools in {kib}. For more +information, see +{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. + +[[built-in-roles-kibana-system]] `kibana_system` :: +Grants access necessary for the {kib} system user to read from and write to the +{kib} indices, manage index templates and tokens, and check the availability of +the {es} cluster. This role grants read access to the `.monitoring-*` indices +and read and write access to the `.reporting-*` indices. For more information, +see {kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[built-in-roles-kibana-user]] `kibana_user`:: +Grants access to all features in {kib}. For more information on Kibana authorization, +see {kibana-ref}/xpack-security-authorization.html[Kibana Authorization]. + +[[built-in-roles-logstash-admin]] `logstash_admin` :: +Grants access to the `.logstash*` indices for managing configurations. + +[[built-in-roles-logstash-system]] `logstash_system` :: +Grants access necessary for the Logstash system user to send system-level data +(such as monitoring) to {es}. For more information, see +{logstash-ref}/ls-security.html[Configuring Security in Logstash]. ++ +-- +[NOTE] +=============================== +* This role should not be assigned to users as the granted permissions may +change between releases. +* This role does not provide access to the logstash indices and is not +suitable for use within a Logstash pipeline. +=============================== +-- + +[[built-in-roles-ml-admin]] `machine_learning_admin`:: +Grants `manage_ml` cluster privileges, read access to `.ml-anomalies*`, +`.ml-notifications*`, `.ml-state*`, `.ml-meta*` indices and write access to +`.ml-annotations*` indices. + +[[built-in-roles-ml-user]] `machine_learning_user`:: +Grants the minimum privileges required to view {ml} configuration, +status, and work with results. This role grants `monitor_ml` cluster privileges, +read access to the `.ml-notifications` and `.ml-anomalies*` indices +(which store {ml} results), and write access to `.ml-annotations*` indices. + +[[built-in-roles-monitoring-user]] `monitoring_user`:: +Grants the minimum privileges required for any user of {monitoring} other than those +required to use {kib}. This role grants access to the monitoring indices and grants +privileges necessary for reading basic cluster information. Monitoring users should +also be assigned the `kibana_user` role. + +[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: +Grants the minimum privileges required to write data into the monitoring indices +(`.monitoring-*`). This role also has the privileges necessary to create +{metricbeat} indices (`metricbeat-*`) and write data into them. + +[[built-in-roles-remote-monitoring-collector]] `remote_monitoring_collector`:: +Grants the minimum privileges required to collect monitoring data for the {stack}. + +[[built-in-roles-reporting-user]] `reporting_user`:: +Grants the specific privileges required for users of {reporting} other than those +required to use {kib}. This role grants access to the reporting indices; each +user has access to only their own reports. Reporting users should also be +assigned the `kibana_user` role and a role that grants them access to the data +that will be used to generate reports. + +[[built-in-roles-snapshot-user]] `snapshot_user`:: +Grants the necessary privileges to create snapshots of **all** the indices and +to view their metadata. This role enables users to view the configuration of +existing snapshot repositories and snapshot details. It does not grant authority +to remove or add repositories or to restore snapshots. It also does not enable +to change index settings or to read or update index data. + +[[built-in-roles-superuser]] `superuser`:: +Grants full access to the cluster, including all indices and data. A user with +the `superuser` role can also manage users and roles and +<> any other user in the system. Due to the +permissive nature of this role, take extra care when assigning it to a user. + +[[built-in-roles-transport-client]] `transport_client`:: +Grants the privileges required to access the cluster through the Java Transport +Client. The Java Transport Client fetches information about the nodes in the +cluster using the _Node Liveness API_ and the _Cluster State API_ (when +sniffing is enabled). Assign your users this role if they use the +Transport Client. ++ +NOTE: Using the Transport Client effectively means the users are granted access +to the cluster state. This means users can view the metadata over all indices, +index templates, mappings, node and basically everything about the cluster. +However, this role does not grant permission to view the data in all indices. + +[[built-in-roles-watcher-admin]] `watcher_admin`:: ++ +Grants read access to the `.watches` index, read access to the watch history and +the triggered watches index and allows to execute all watcher actions. + +[[built-in-roles-watcher-user]] `watcher_user`:: ++ +Grants read access to the `.watches` index, the get watch action and the watcher +stats. diff --git a/x-pack/docs/en/security/authorization/custom-authorization.asciidoc b/x-pack/docs/en/security/authorization/custom-authorization.asciidoc index 735fb26cc58a3..7e4dccf9b8094 100644 --- a/x-pack/docs/en/security/authorization/custom-authorization.asciidoc +++ b/x-pack/docs/en/security/authorization/custom-authorization.asciidoc @@ -75,7 +75,7 @@ implementation. Sample code that illustrates the structure and implementation of a custom authorization engine is provided in the -https://github.com/elastic/elasticsearch/tree/master/plugin/examples/security-example-authorization-engine[elasticsearch] +https://github.com/elastic/elasticsearch/tree/master/plugins/examples/security-authorization-engine[elasticsearch] repository on GitHub. You can use this code as a starting point for creating your own authorization engine. diff --git a/x-pack/docs/en/security/authorization/document-level-security.asciidoc b/x-pack/docs/en/security/authorization/document-level-security.asciidoc new file mode 100644 index 0000000000000..bf48c5b226e5f --- /dev/null +++ b/x-pack/docs/en/security/authorization/document-level-security.asciidoc @@ -0,0 +1,60 @@ +[role="xpack"] +[[document-level-security]] +=== Document level security + +Document level security restricts the documents that users have read access to. +In particular, it restricts which documents can be accessed from document-based +read APIs. + +To enable document level security, you use a query to specify the documents that +each role can access. The document query is associated with a particular index +or index pattern and operates in conjunction with the privileges specified for +the indices. + +The following role definition grants read access only to documents that +belong to the `click` category within all the `events-*` indices: + +[source,js] +-------------------------------------------------- +POST /_security/role/click_role +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +-------------------------------------------------- +// CONSOLE + +NOTE: Omitting the `query` entry entirely disables document level security for + the respective indices permission entry. + +The specified `query` expects the same format as if it was defined in the +search request and supports the full {es} {ref}/query-dsl.html[Query DSL]. + +For example, the following role grants read access only to the documents whose +`department_id` equals `12`: + +[source,js] +-------------------------------------------------- +POST /_security/role/dept_role +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "query" : { + "term" : { "department_id" : 12 } + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +NOTE: `query` also accepts queries written as string values. + +For more information, see <>. \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc index 119a090232c2f..ee8731e17af3b 100644 --- a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc +++ b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -25,6 +25,9 @@ grant wider access than intended. Each user has a single set of field level and document level permissions per index. See <>. ===================================================================== +NOTE: Document- and field-level security disables the +{ref}/shard-request-cache.html[shard request cache]. + [[multiple-roles-dls-fls]] ==== Multiple roles with document and field level security diff --git a/x-pack/docs/en/security/authorization/field-level-security.asciidoc b/x-pack/docs/en/security/authorization/field-level-security.asciidoc new file mode 100644 index 0000000000000..b8c873e2e88e4 --- /dev/null +++ b/x-pack/docs/en/security/authorization/field-level-security.asciidoc @@ -0,0 +1,230 @@ +[role="xpack"] +[[field-level-security]] +=== Field level security + +Field level security restricts the fields that users have read access to. +In particular, it restricts which fields can be accessed from document-based +read APIs. + +To enable field level security, specify the fields that each role can access +as part of the indices permissions in a role definition. Field level security is +thus bound to a well-defined set of indices (and potentially a set of +<>). + +The following role definition grants read access only to the `category`, +`@timestamp`, and `message` fields in all the `events-*` indices. + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role1 +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Access to the following meta fields is always allowed: `_id`, +`_type`, `_parent`, `_routing`, `_timestamp`, `_ttl`, `_size` and `_index`. If +you specify an empty list of fields, only these meta fields are accessible. + +NOTE: Omitting the fields entry entirely disables field level security. + +You can also specify field expressions. For example, the following +example grants read access to all fields that start with an `event_` prefix: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role2 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "event_*" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +Use the dot notations to refer to nested fields in more complex documents. For +example, assuming the following document: + +[source,js] +-------------------------------------------------- +{ + "customer": { + "handle": "Jim", + "email": "jim@mycompany.com", + "phone": "555-555-5555" + } +} +-------------------------------------------------- +// NOTCONSOLE + +The following role definition enables only read access to the customer `handle` +field: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role3 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +This is where wildcard support shines. For example, use `customer.*` to enable +only read access to the `customer` data: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role4 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +You can deny permission to access fields with the following syntax: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role5 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "*"], + "except": [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +The following rules apply: + +* The absence of `field_security` in a role is equivalent to * access. +* If permission has been granted explicitly to some fields, you can specify +denied fields. The denied fields must be a subset of the fields to which +permissions were granted. +* Defining denied and granted fields implies access to all granted fields except +those which match the pattern in the denied fields. + +For example: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role6 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "except": [ "customer.handle" ], + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +In the above example, users can read all fields with the prefix "customer." +except for "customer.handle". + +An empty array for `grant` (for example, `"grant" : []`) means that access has +not been granted to any fields. + +When a user has several roles that specify field level permissions, the +resulting field level permissions per index are the union of the individual role +permissions. For example, if these two roles are merged: + +[source,js] +-------------------------------------------------- +POST /_security/role/test_role7 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b*" ] + } + } + ] +} + +POST /_security/role/test_role8 +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.b*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- +// CONSOLE + +The resulting permission is equal to: + +[source,js] +-------------------------------------------------- +{ + // role 1 + role 2 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +NOTE: Field-level security should not be set on {ref}/alias.html[`alias`] fields. To secure a +concrete field, its field name must be used directly. + +For more information, see <>. \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/images/authorization.png b/x-pack/docs/en/security/authorization/images/authorization.png new file mode 100644 index 0000000000000..1d692f2e3a9e2 Binary files /dev/null and b/x-pack/docs/en/security/authorization/images/authorization.png differ diff --git a/x-pack/docs/en/security/authorization/index.asciidoc b/x-pack/docs/en/security/authorization/index.asciidoc new file mode 100644 index 0000000000000..81c99960065e0 --- /dev/null +++ b/x-pack/docs/en/security/authorization/index.asciidoc @@ -0,0 +1,12 @@ + +include::overview.asciidoc[] +include::built-in-roles.asciidoc[] +include::managing-roles.asciidoc[] +include::privileges.asciidoc[] +include::document-level-security.asciidoc[] +include::field-level-security.asciidoc[] +include::alias-privileges.asciidoc[] +include::mapping-roles.asciidoc[] +include::field-and-document-access-control.asciidoc[] +include::run-as-privilege.asciidoc[] +include::custom-authorization.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index 04fb12e19d75b..ab58fcc817cad 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -186,7 +186,7 @@ see <>. === Role management UI You can manage users and roles easily in {kib}. To -manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. +manage roles, log in to {kib} and go to *Management / Security / Roles*. [float] [[roles-management-api]] diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc new file mode 100644 index 0000000000000..feb2014e30ee3 --- /dev/null +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -0,0 +1,75 @@ +[role="xpack"] +[[authorization]] +== User authorization + +The {stack-security-features} add _authorization_, which is the process of determining whether the user behind an incoming request is allowed to execute +the request. + +This process takes place after the user is successfully identified and +<>. + +[[roles]] +[float] +=== Role-based access control + +The {security-features} provide a role-based access control (RBAC) mechanism, +which enables you to authorize users by assigning privileges to roles and +assigning roles to users or groups. + +image::security/authorization/images/authorization.png[This image illustrates role-based access control] + +The authorization process revolves around the following constructs: + +_Secured Resource_:: +A resource to which access is restricted. Indices, aliases, documents, fields, +users, and the {es} cluster itself are all examples of secured objects. + +_Privilege_:: +A named group of one or more actions that a user may execute against a +secured resource. Each secured resource has its own sets of available privileges. +For example, `read` is an index privilege that represents all actions that enable +reading the indexed/stored data. For a complete list of available privileges +see <>. + +_Permissions_:: +A set of one or more privileges against a secured resource. Permissions can +easily be described in words, here are few examples: + * `read` privilege on the `products` index + * `manage` privilege on the cluster + * `run_as` privilege on `john` user + * `read` privilege on documents that match query X + * `read` privilege on `credit_card` field + +_Role_:: +A named set of permissions + +_User_:: +The authenticated user. + +_Group_:: +One or more groups to which a user belongs. Groups are not supported in some +realms, such as native, file, or PKI realms. + +A role has a unique name and identifies a set of permissions that translate to +privileges on resources. You can associate a user or group with an arbitrary +number of roles. When you map roles to groups, the roles of a user in that group +are the combination of the roles assigned to that group and the roles assigned +to that user. Likewise, the total set of permissions that a user has is defined +by the union of the permissions in all its roles. + +The method for assigning roles to users varies depending on which realms you use +to authenticate users. For more information, see <>. + +[[attributes]] +[float] +=== Attribute-based access control + +The {security-features} also provide an attribute-based access control (ABAC) +mechanism, which enables you to use attributes to restrict access to documents +in search queries and aggregations. For example, you can assign attributes to +users and documents, then implement an access policy in a role definition. Users +with that role can read a specific document only if they have all the required +attributes. + +For more information, see +https://www.elastic.co/blog/attribute-based-access-control-with-xpack[Document-level attribute-based access control with X-Pack 6.1]. diff --git a/x-pack/docs/en/security/authorization/privileges.asciidoc b/x-pack/docs/en/security/authorization/privileges.asciidoc new file mode 100644 index 0000000000000..b4d8a64e19149 --- /dev/null +++ b/x-pack/docs/en/security/authorization/privileges.asciidoc @@ -0,0 +1,208 @@ +[role="xpack"] +[[security-privileges]] +=== Security privileges + +This section lists the privileges that you can assign to a role. + +[[privileges-list-cluster]] +==== Cluster privileges + +[horizontal] +`all`:: +All cluster administration operations, like snapshotting, node shutdown/restart, +settings update, rerouting, or managing users and roles. + +`create_snapshot`:: +Privileges to create snapshots for existing repositories. Can also list and view +details on existing repositories and snapshots. + +`manage`:: +Builds on `monitor` and adds cluster operations that change values in the cluster. +This includes snapshotting, updating settings, and rerouting. It also includes +obtaining snapshot and restore status. This privilege does not include the +ability to manage security. + +`manage_ccr`:: +All {ccr} operations related to managing follower indices and auto-follow +patterns. It also includes the authority to grant the privileges necessary to +manage follower indices and auto-follow patterns. This privilege is necessary +only on clusters that contain follower indices. + +`manage_ilm`:: +All {Ilm} operations related to managing policies + +`manage_index_templates`:: +All operations on index templates. + +`manage_ingest_pipelines`:: +All operations on ingest node pipelines. + +`manage_ml`:: +All {ml} operations, such as creating and deleting {dfeeds}, jobs, and model +snapshots. ++ +-- +NOTE: {dfeeds-cap} that were created prior to version 6.2 or created when +{security-features} were disabled run as a system user with elevated privileges, +including permission to read all indices. Newer {dfeeds} run with the security +roles of the user who created or updated them. + +-- + +`manage_pipeline`:: +All operations on ingest pipelines. + +`manage_rollup`:: +All rollup operations, including creating, starting, stopping and deleting +rollup jobs. + +`manage_saml`:: +Enables the use of internal {es} APIs to initiate and manage SAML authentication +on behalf of other users. + +`manage_security`:: +All security-related operations such as CRUD operations on users and roles and +cache clearing. + +`manage_token`:: +All security-related operations on tokens that are generated by the {es} Token +Service. + +`manage_watcher`:: +All watcher operations, such as putting watches, executing, activate or acknowledging. ++ +-- +NOTE: Watches that were created prior to version 6.1 or created when the +{security-features} were disabled run as a system user with elevated privileges, +including permission to read and write all indices. Newer watches run with the +security roles of the user who created or updated them. + +-- + +`monitor`:: +All cluster read-only operations, like cluster health and state, hot threads, +node info, node and cluster stats, and pending cluster tasks. + +`monitor_ml`:: +All read only {ml} operations, such as getting information about {dfeeds}, jobs, +model snapshots, or results. + +`monitor_rollup`:: +All read only rollup operations, such as viewing the list of historical and +currently running rollup jobs and their capabilities. + +`monitor_watcher`:: +All read only watcher operations, such as getting a watch and watcher stats. + +`read_ccr`:: +All read only {ccr} operations, such as getting information about indices and +metadata for leader indices in the cluster. It also includes the authority to +check whether users have the appropriate privileges to follow leader indices. +This privilege is necessary only on clusters that contain leader indices. + +`read_ilm`:: +All read only {Ilm} operations, such as getting policies and checking the +status of {Ilm} + +`transport_client`:: +All privileges necessary for a transport client to connect. Required by the remote +cluster to enable <>. + +[[privileges-list-indices]] +==== Indices privileges + +[horizontal] +`all`:: +Any action on an index + +`create`:: +Privilege to index documents. Also grants access to the update mapping +action. ++ +-- +NOTE: This privilege does not restrict the index operation to the creation +of documents but instead restricts API use to the index API. The index API allows a user +to overwrite a previously indexed document. + +-- + +`create_index`:: +Privilege to create an index. A create index request may contain aliases to be +added to the index once created. In that case the request requires the `manage` +privilege as well, on both the index and the aliases names. + +`delete`:: +Privilege to delete documents. + +`delete_index`:: +Privilege to delete an index. + +`index`:: +Privilege to index and update documents. Also grants access to the update +mapping action. + +`manage`:: +All `monitor` privileges plus index administration (aliases, analyze, cache clear, +close, delete, exists, flush, mapping, open, force merge, refresh, settings, +search shards, templates, validate). + +`manage_follow_index`:: +All actions that are required to manage the lifecycle of a follower index, which +includes creating a follower index, closing it, and converting it to a regular +index. This privilege is necessary only on clusters that contain follower indices. + +`manage_ilm`:: +All {Ilm} operations relating to managing the execution of policies of an index +This includes operations like retrying policies, and removing a policy +from an index. + +`manage_leader_index`:: +All actions that are required to manage the lifecycle of a leader index, which +includes {ref}/ccr-post-forget-follower.html[forgetting a follower]. This +privilege is necessary only on clusters that contain leader indices. + +`monitor`:: +All actions that are required for monitoring (recovery, segments info, index +stats and status). + +`read`:: +Read only access to actions (count, explain, get, mget, get indexed scripts, +more like this, multi percolate/search/termvector, percolate, scroll, +clear_scroll, search, suggest, tv). + +`read_cross_cluster`:: +Read only access to the search action from a <>. + +`view_index_metadata`:: +Read-only access to index metadata (aliases, aliases exists, get index, exists, field mappings, +mappings, search shards, type exists, validate, warmers, settings, ilm). This +privilege is primarily available for use by {kib} users. + +`write`:: +Privilege to perform all write operations to documents, which includes the +permission to index, update, and delete documents as well as performing bulk +operations. Also grants access to the update mapping action. + + +==== Run as privilege + +The `run_as` permission enables an authenticated user to submit requests on +behalf of another user. The value can be a user name or a comma-separated list +of user names. (You can also specify users as an array of strings or a YAML +sequence.) For more information, see +<>. + +[[application-privileges]] +==== Application privileges + +Application privileges are managed within {es} and can be retrieved with the +{ref}/security-api-has-privileges.html[has privileges API] and the +{ref}/security-api-get-privileges.html[get application privileges API]. They do +not, however, grant access to any actions or resources within {es}. Their +purpose is to enable applications to represent and store their own privilege +models within {es} roles. + +To create application privileges, use the +{ref}/security-api-put-privileges.html[add application privileges API]. You can +then associate these application privileges with roles, as described in +<>. diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc deleted file mode 100644 index 1a52a9dab7a87..0000000000000 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ /dev/null @@ -1,51 +0,0 @@ -[role="xpack"] -[[ccs-clients-integrations]] -== Cross cluster search, clients, and integrations - -When using {ref}/modules-cross-cluster-search.html[Cross Cluster Search] -you need to take extra steps to secure communications with the connected -clusters. - -* <> - -You will need to update the configuration for several clients to work with a -secured cluster: - -* <> -* <> - - -The {es} {security-features} enable you to secure your {es} cluster. But -{es} itself is only one product within the {stack}. It is often the case that -other products in the stack are connected to the cluster and therefore need to -be secured as well, or at least communicate with the cluster in a secured way: - -* <> -* {auditbeat-ref}/securing-beats.html[Auditbeat] -* {filebeat-ref}/securing-beats.html[Filebeat] -* {heartbeat-ref}/securing-beats.html[Heartbeat] -* {kibana-ref}/using-kibana-with-security.html[{kib}] -* {logstash-ref}/ls-security.html[Logstash] -* {metricbeat-ref}/securing-beats.html[Metricbeat] -* <> -* {packetbeat-ref}/securing-beats.html[Packetbeat] -* {kibana-ref}/secure-reporting.html[Reporting] -* {winlogbeat-ref}/securing-beats.html[Winlogbeat] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc -include::ccs-clients-integrations/cross-cluster.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc -include::ccs-clients-integrations/java.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc -include::ccs-clients-integrations/http.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc -include::ccs-clients-integrations/hadoop.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc -include::ccs-clients-integrations/beats.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc -include::ccs-clients-integrations/monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc new file mode 100644 index 0000000000000..95e5d188f0084 --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster-kibana.asciidoc @@ -0,0 +1,39 @@ +[[cross-cluster-kibana]] +==== {ccs-cap} and {kib} + +When {kib} is used to search across multiple clusters, a two-step authorization +process determines whether or not the user can access indices on a remote +cluster: + +* First, the local cluster determines if the user is authorized to access remote +clusters. (The local cluster is the cluster {kib} is connected to.) +* If they are, the remote cluster then determines if the user has access +to the specified indices. + +To grant {kib} users access to remote clusters, assign them a local role +with read privileges to indices on the remote clusters. You specify remote +cluster indices as `:`. + +To enable users to actually read the remote indices, you must create a matching +role on the remote clusters that grants the `read_cross_cluster` privilege +and access to the appropriate indices. + +For example, if {kib} is connected to the cluster where you're actively +indexing {ls} data (your _local cluster_) and you're periodically +offloading older time-based indices to an archive cluster +(your _remote cluster_) and you want to enable {kib} users to search both +clusters: + +. On the local cluster, create a `logstash_reader` role that grants +`read` and `view_index_metadata` privileges on the local `logstash-*` indices. ++ +NOTE: If you configure the local cluster as another remote in {es}, the +`logstash_reader` role on your local cluster also needs to grant the +`read_cross_cluster` privilege. + +. Assign your {kib} users the `kibana_user` role and your `logstash_reader` +role. + +. On the remote cluster, create a `logstash_reader` role that grants the +`read_cross_cluster` privilege and `read` and `view_index_metadata` privileges +for the `logstash-*` indices. diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index b72afcb9b011c..3f470fffcbdf4 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -156,5 +156,4 @@ GET two:logs-2017.04/_search <1> // TEST[skip:todo] //TBD: Is there a missing description of the <1> callout above? -:edit_url: https://github.com/elastic/kibana/edit/{branch}/docs/security/cross-cluster-kibana.asciidoc -include::{kib-repo-dir}/security/cross-cluster-kibana.asciidoc[] +include::cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc index ca22ceeebbe22..aef7d093aefcd 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc @@ -54,7 +54,7 @@ specific clients, refer to https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication[Ruby], http://elasticsearch-py.readthedocs.org/en/master/#ssl-and-authentication[Python], https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl], -http://www.elastic.co/guide/en/elasticsearch/client/php-api/current/_security.html[PHP], +http://www.elastic.co/guide/en/elasticsearch/client/php-api/current/security.html[PHP], http://nest.azurewebsites.net/elasticsearch-net/security.html[.NET], http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[JavaScript] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/index.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/index.asciidoc new file mode 100644 index 0000000000000..566731edbbb56 --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/index.asciidoc @@ -0,0 +1,40 @@ +[role="xpack"] +[[ccs-clients-integrations]] +== Cross cluster search, clients, and integrations + +When using <> +you need to take extra steps to secure communications with the connected +clusters. + +* <> + +You will need to update the configuration for several clients to work with a +secured cluster: + +* <> +* <> + + +The {es} {security-features} enable you to secure your {es} cluster. But +{es} itself is only one product within the {stack}. It is often the case that +other products in the stack are connected to the cluster and therefore need to +be secured as well, or at least communicate with the cluster in a secured way: + +* <> +* {auditbeat-ref}/securing-beats.html[Auditbeat] +* {filebeat-ref}/securing-beats.html[Filebeat] +* {heartbeat-ref}/securing-beats.html[Heartbeat] +* {kibana-ref}/using-kibana-with-security.html[{kib}] +* {logstash-ref}/ls-security.html[Logstash] +* {metricbeat-ref}/securing-beats.html[Metricbeat] +* <> +* {packetbeat-ref}/securing-beats.html[Packetbeat] +* {kibana-ref}/secure-reporting.html[Reporting] +* {winlogbeat-ref}/securing-beats.html[Winlogbeat] + +include::cross-cluster.asciidoc[] +include::java.asciidoc[] +include::http.asciidoc[] +include::hadoop.asciidoc[] +include::beats.asciidoc[] +include::monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc index 37c7e38f651bd..a46767629a003 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc @@ -1,7 +1,7 @@ [[secure-monitoring]] === Monitoring and security -The <> consist of two components: +The {stack} {monitor-features} consist of two components: an agent that you install on on each {es} and Logstash node, and a Monitoring UI in {kib}. The monitoring agent collects and indexes metrics from the nodes and you visualize the data through the Monitoring dashboards in {kib}. The agent @@ -17,7 +17,7 @@ with the monitoring cluster. For more information, see: -* {ref}/configuring-monitoring.html[Configuring monitoring in {es}] +* <> * {kibana-ref}/monitoring-xpack-kibana.html[Configuring monitoring in {kib}] * {logstash-ref}/configuring-logstash.html[Configuring monitoring for Logstash nodes] diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index e1dab76293c5f..1b16efb91ff07 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -8,41 +8,32 @@ The {es} {security-features} enable you to easily secure a cluster. You can password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and -auditing. For more information, see -{stack-ov}/elasticsearch-security.html[Securing the {stack}]. +auditing. -To use {es} {security-features}: - -. Verify that you are using a license that includes the {security-features}. +. Verify that you are using a license that includes the specific +{security-features} you want. + -- -If you want to try all of the platinum features, you can start a 30-day trial. -At the end of the trial period, you can purchase a subscription to keep using -the full functionality. For more information, see -https://www.elastic.co/subscriptions and -{stack-ov}/license-management.html[License Management]. +For more information, see https://www.elastic.co/subscriptions and +{stack-ov}/license-management.html[License management]. -- . Verify that the `xpack.security.enabled` setting is `true` on each node in -your cluster. If you are using a trial license, the default value is `false`. -For more information, see {ref}/security-settings.html[Security Settings in {es}]. +your cluster. If you are using basic or trial licenses, the default value is `false`. +For more information, see <>. . If you plan to run {es} in a Federal Information Processing Standard (FIPS) 140-2 enabled JVM, see <>. -. Configure Transport Layer Security (TLS/SSL) for internode-communication. +. <>. + -- NOTE: This requirement applies to clusters with more than one node and to clusters with a single node that listens on an external interface. Single-node clusters that use a loopback interface do not have this requirement. For more -information, see -{stack-ov}/encrypting-communications.html[Encrypting Communications]. +information, see <>. -- -.. <>. - -.. <>. . If it is not already running, start {es}. @@ -50,13 +41,12 @@ information, see + -- The {es} {security-features} provide -{stack-ov}/built-in-users.html[built-in users] to +<> to help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. For example, you can run the command in an "interactive" mode, which prompts you -to enter new passwords for the `elastic`, `kibana`, `beats_system`, -`logstash_system`, and `apm_system` users: +to enter new passwords for the built-in users: [source,shell] -------------------------------------------------- @@ -73,22 +63,29 @@ user API. -- -. Choose which types of realms you want to use to authenticate users. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. -** <>. +. Choose which types of realms you want to use to authenticate users. ++ +-- +TIP: The types of authentication realms that you can enable varies according to +your subscription. For more information, see https://www.elastic.co/subscriptions. + +-- +** <> +** <> +** <> +** <> +** <> +** <> +** <> . Set up roles and users to control access to {es}. ++ +-- For example, to grant _John Doe_ full access to all indices that match the pattern `events*` and enable him to create visualizations and dashboards for those indices in {kib}, you could create an `events_admin` role and assign the role to a new `johndoe` user. -+ --- + [source,shell] ---------------------------------------------------------- curl -XPOST -u elastic 'localhost:9200/_security/role/events_admin' -H "Content-Type: application/json" -d '{ @@ -114,11 +111,13 @@ curl -XPOST -u elastic 'localhost:9200/_security/user/johndoe' -H "Content-Type: // NOTCONSOLE -- -[[enable-auditing]] -. Enable auditing to keep track of attempted and successful interactions with - your {es} cluster: +. [[enable-auditing]](Optional) Enable auditing to keep track of attempted and +successful interactions with your {es} cluster: + -- +TIP: Audit logging is available with specific subscriptions. For more +information, see https://www.elastic.co/subscriptions. + .. Add the following setting to `elasticsearch.yml` on all nodes in your cluster: + [source,yaml] @@ -126,8 +125,7 @@ curl -XPOST -u elastic 'localhost:9200/_security/user/johndoe' -H "Content-Type: xpack.security.audit.enabled: true ---------------------------- + -For more information, see {stack-ov}/auditing.html[Auditing Security Events] -and <>. +For more information, see <> and <>. .. Restart {es}. @@ -135,30 +133,20 @@ Events are logged to a dedicated `_audit.json` file in `ES_HOME/logs`, on each cluster node. -- -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc -include::{es-repo-dir}/security/securing-communications/securing-elasticsearch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc -include::{es-repo-dir}/security/securing-communications/configuring-tls-docker.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc -include::{es-repo-dir}/security/securing-communications/enabling-cipher-suites.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc -include::{es-repo-dir}/security/securing-communications/separating-node-client-traffic.asciidoc[] -:edit_url: +To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see +<>. + +include::securing-communications/securing-elasticsearch.asciidoc[] +include::securing-communications/configuring-tls-docker.asciidoc[] +include::securing-communications/enabling-cipher-suites.asciidoc[] +include::securing-communications/separating-node-client-traffic.asciidoc[] include::authentication/configuring-active-directory-realm.asciidoc[] include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc include::authentication/configuring-kerberos-realm.asciidoc[] -:edit_url: include::fips-140-compliance.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/security-settings.asciidoc -include::{es-repo-dir}/settings/security-settings.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/reference/files.asciidoc -include::{es-repo-dir}/security/reference/files.asciidoc[] +include::reference/files.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/audit-settings.asciidoc -include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/fips-140-compliance.asciidoc b/x-pack/docs/en/security/fips-140-compliance.asciidoc index 6bc9be512db4e..8c11503b3d3fb 100644 --- a/x-pack/docs/en/security/fips-140-compliance.asciidoc +++ b/x-pack/docs/en/security/fips-140-compliance.asciidoc @@ -89,7 +89,7 @@ the following table: {es} offers a number of algorithms for securely hashing credentials in memory and on disk. However, only the `PBKDF2` family of algorithms is compliant with FIPS -140-2 for password hashing. You must set the the `cache.hash_algo` realm settings +140-2 for password hashing. You must set the `cache.hash_algo` realm settings and the `xpack.security.authc.password_hashing.algorithm` setting to one of the available `PBKDF2` values. See <>. diff --git a/x-pack/docs/en/security/get-started-builtin-users.asciidoc b/x-pack/docs/en/security/get-started-builtin-users.asciidoc new file mode 100644 index 0000000000000..766c3263d756d --- /dev/null +++ b/x-pack/docs/en/security/get-started-builtin-users.asciidoc @@ -0,0 +1,33 @@ +// tag::create-users[] +There are <> that you can use for specific +administrative purposes: `apm_system`, `beats_system`, `elastic`, `kibana`, +`logstash_system`, and `remote_monitoring_user`. + +// end::create-users[] + +Before you can use them, you must set their passwords: + +. Restart {es}. For example, if you installed {es} with a `.tar.gz` package, run +the following command from the {es} directory: ++ +-- +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch +---------------------------------------------------------------------- + +See {ref}/starting-elasticsearch.html[Starting {es}]. +-- + +. Set the built-in users' passwords. ++ +-- +// tag::create-users[] +Run the following command from the {es} directory: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch-setup-passwords interactive +---------------------------------------------------------------------- +// end::create-users[] +-- diff --git a/x-pack/docs/en/security/get-started-enable-security.asciidoc b/x-pack/docs/en/security/get-started-enable-security.asciidoc new file mode 100644 index 0000000000000..bbe2999fc6753 --- /dev/null +++ b/x-pack/docs/en/security/get-started-enable-security.asciidoc @@ -0,0 +1,35 @@ +When you use the basic and trial licenses, the {es} {security-features} are +disabled by default. To enable them: + +. Stop {kib}. The method for starting and stopping {kib} varies depending on +how you installed it. For example, if you installed {kib} from an archive +distribution (`.tar.gz` or `.zip`), stop it by entering `Ctrl-C` on the command +line. See {kibana-ref}/start-stop.html[Starting and stopping {kib}]. + +. Stop {es}. For example, if you installed {es} from an archive distribution, +enter `Ctrl-C` on the command line. See +{ref}/stopping-elasticsearch.html[Stopping {es}]. + +. Add the `xpack.security.enabled` setting to the +`ES_PATH_CONF/elasticsearch.yml` file. ++ +-- +TIP: The `ES_PATH_CONF` environment variable contains the path for the {es} +configuration files. If you installed {es} using archive distributions (`zip` or +`tar.gz`), it defaults to `ES_HOME/config`. If you used package distributions +(Debian or RPM), it defaults to `/etc/elasticsearch`. For more information, see +{ref}/settings.html[Configuring {es}]. + +For example, add the following setting: + +[source,yaml] +---- +xpack.security.enabled: true +---- + +TIP: If you have a basic or trial license, the default value for this setting is +`false`. If you have a gold or higher license, the default value is `true`. +Therefore, it is a good idea to explicitly add this setting to avoid confusion +about whether {security-features} are enabled. + +-- diff --git a/x-pack/docs/en/security/get-started-kibana-users.asciidoc b/x-pack/docs/en/security/get-started-kibana-users.asciidoc new file mode 100644 index 0000000000000..2d06f670cdcba --- /dev/null +++ b/x-pack/docs/en/security/get-started-kibana-users.asciidoc @@ -0,0 +1,62 @@ +When the {es} {security-features} are enabled, users must log in to {kib} +with a valid user ID and password. + +{kib} also performs some tasks under the covers that require use of the +built-in `kibana` user. + +. Configure {kib} to use the built-in `kibana` user and the password that you +created: + +** If you don't mind having passwords visible in your configuration file, +uncomment and update the following settings in the `kibana.yml` file in your +{kib} directory: ++ +-- +TIP: If you installed {kib} using archive distributions (`zip` or +`tar.gz`), the `kibana.yml` configuration file is in `KIBANA_HOME/config`. If +you used package distributions (Debian or RPM), it's in `/etc/kibana`. For more +information, see {kibana-ref}/settings.html[Configuring {kib}]. + +For example, add the following settings: + +[source,yaml] +---- +elasticsearch.username: "kibana" +elasticsearch.password: "your_password" +---- + +Specify the password that you set with the `elasticsearch-setup-passwords` +command then save your changes to the file. +-- + +** If you prefer not to put your user ID and password in the `kibana.yml` file, +store them in a keystore instead. Run the following commands to create the {kib} +keystore and add the secure settings: ++ +-- +// tag::store-kibana-user[] +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/kibana-keystore create +./bin/kibana-keystore add elasticsearch.username +./bin/kibana-keystore add elasticsearch.password +---------------------------------------------------------------------- + +When prompted, specify the `kibana` built-in user and its password for these +setting values. The settings are automatically applied when you start {kib}. +To learn more, see {kibana-ref}/secure-settings.html[Secure settings]. +// end::store-kibana-user[] +-- + +. Restart {kib}. For example, if you installed +{kib} with a `.tar.gz` package, run the following command from the {kib} +directory: ++ +-- +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/kibana +---------------------------------------------------------------------- + +See {kibana-ref}/start-stop.html[Starting and stopping {kib}]. +-- \ No newline at end of file diff --git a/x-pack/docs/en/security/get-started-security.asciidoc b/x-pack/docs/en/security/get-started-security.asciidoc new file mode 100644 index 0000000000000..38267d44af72d --- /dev/null +++ b/x-pack/docs/en/security/get-started-security.asciidoc @@ -0,0 +1,376 @@ +[role="xpack"] +[testenv="basic"] +[[security-getting-started]] +== Tutorial: Getting started with security + +In this tutorial, you learn how to secure a cluster by configuring users and +roles in {es}, {kib}, {ls}, and {metricbeat}. + +[float] +[[get-started-security-prerequisites]] +=== Before you begin + +. Install and configure {es}, {kib}, {ls}, and {metricbeat} as described in +{stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}]. ++ +-- +IMPORTANT: To complete this tutorial, you must install the default {es} and +{kib} packages, which include role-based access control (RBAC) and native +authentication {security-features}. When you install these products, they apply +basic licenses with no expiration dates. All of the subsequent steps in this +tutorial assume that you are using a basic license. For more information, see +{subscriptions} and {stack-ov}/license-management.html[License management]. + +-- + +. Stop {ls}. The method for starting and stopping {ls} varies depending on whether +you are running it from the command line or running it as a service. For example, +if you are running {ls} from the command line, you can stop it by entering +`Ctrl-C`. See {logstash-ref}/shutdown.html[Shutting down {ls}]. + +. Stop {metricbeat}. For example, enter `Ctrl-C` on the command line where it is +running. + +. Launch the {kib} web interface by pointing your browser to port 5601. For +example, http://127.0.0.1:5601[http://127.0.0.1:5601]. + +[role="xpack"] +[[get-started-enable-security]] +=== Enable {es} {security-features} + +include::get-started-enable-security.asciidoc[] + +. Enable single-node discovery in the `ES_PATH_CONF/elasticsearch.yml` file. ++ +-- +This tutorial involves a single node cluster, but if you had multiple +nodes, you would enable {es} {security-features} on every node in the cluster +and configure Transport Layer Security (TLS) for internode-communication, which +is beyond the scope of this tutorial. By enabling single-node discovery, we are +postponing the configuration of TLS. For example, add the following setting: + +[source,yaml] +---- +discovery.type: single-node +---- + +For more information, see <>. +-- + +When you enable {es} {security-features}, basic authentication is enabled by +default. To communicate with the cluster, you must specify a username and +password. Unless you <>, all requests +that don't include a user name and password are rejected. + +[role="xpack"] +[[get-started-built-in-users]] +=== Create passwords for built-in users + +include::get-started-builtin-users.asciidoc[] + +You need these built-in users in subsequent steps, so choose passwords that you +can remember! + +NOTE: This tutorial does not use the built-in `apm_system`, `logstash_system`, +`beats_system`, and `remote_monitoring_user` users, which are typically +associated with monitoring. For more information, see +{logstash-ref}/ls-security.html#ls-monitoring-user[Configuring credentials for {ls} monitoring] +and {metricbeat-ref}/monitoring.html[Monitoring {metricbeat}]. + +[role="xpack"] +[[get-started-kibana-user]] +=== Add the built-in user to {kib} + +include::get-started-kibana-users.asciidoc[] + +[role="xpack"] +[[get-started-authentication]] +=== Configure authentication + +Now that you've set up the built-in users, you need to decide how you want to +manage all the other users. + +The {stack} _authenticates_ users to ensure that they are valid. The +authentication process is handled by _realms_. You can use one or more built-in +realms, such as the native, file, LDAP, PKI, Active Directory, SAML, or Kerberos +realms. Alternatively, you can create your own custom realms. In this tutorial, +we'll use a native realm. + +In general, you configure realms by adding `xpack.security.authc.realms` +settings in the `elasticsearch.yml` file. However, the native realm is available +by default when no other realms are configured. Therefore, you don't need to do +any extra configuration steps in this tutorial. You can jump straight to +creating users! + +If you want to learn more about authentication and realms, see +<>. + +[role="xpack"] +[[get-started-users]] +=== Create users + +Let's create two users in the native realm. + +. Log in to {kib} with the `elastic` built-in user. + +. Go to the *Management / Security / Users* page: ++ +-- +[role="screenshot"] +image::security/images/management-builtin-users.jpg["User management screenshot in Kibana"] + +In this example, you can see a list of built-in users. +-- + +. Click *Create new user*. For example, create a user for yourself: ++ +-- +[role="screenshot"] +image::security/images/create-user.jpg["Creating a user in Kibana"] + +You'll notice that when you create a user, you can assign it a role. Don't +choose a role yet--we'll come back to that in subsequent steps. +-- + +. Click *Create new user* and create a `logstash_internal` user. ++ +-- +In {stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}], +you configured {ls} to listen for {metricbeat} +input and to send the events to {es}. You therefore need to create a user +that {ls} can use to communicate with {es}. For example: + +[role="screenshot"] +image::security/images/create-logstash-user.jpg["Creating a {ls} user in {kib}"] +-- + +[role="xpack"] +[[get-started-roles]] +=== Assign roles + +By default, all users can change their own passwords, get information about +themselves, and run the `authenticate` API. If you want them to do more than +that, you need to give them one or more _roles_. + +Each role defines a specific set of actions (such as read, create, or delete) +that can be performed on specific secured resources (such as indices, aliases, +documents, fields, or clusters). To help you get up and running, there are +built-in roles. + +Go to the *Management / Security / Roles* page to see them: + +[role="screenshot"] +image::security/images/management-roles.jpg["Role management screenshot in Kibana"] + +Select a role to see more information about its privileges. For example, select +the `kibana_system` role to see its list of cluster and index privileges. To +learn more, see <>. + +Let's assign the `kibana_user` role to your user. Go back to the +*Management / Security / Users* page and select your user. Add the `kibana_user` +role and save the change. For example: + +[role="screenshot"] +image::security/images/assign-role.jpg["Assigning a role to a user in Kibana"] + +This user now has access to all features in {kib}. For more information about granting +access to Kibana see {kibana-ref}/xpack-security-authorization.html[Kibana Authorization]. + +If you completed all of the steps in +{stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}], you should +have {metricbeat} data stored in {es}. Let's create two roles that grant +different levels of access to that data. + +Go to the *Management / Security / Roles* page and click *Create role*. + +Create a `metricbeat_reader` role that has `read` and `view_index_metadata` +privileges on the `metricbeat-*` indices: + +[role="screenshot"] +image::security/images/create-reader-role.jpg["Creating a role in Kibana"] + +Create a `metricbeat_writer` role that has `manage_index_templates` and `monitor` +cluster privileges, as well as `write`, `delete`, and `create_index` privileges +on the `metricbeat-*` indices: + +[role="screenshot"] +image::security/images/create-writer-role.jpg["Creating another role in Kibana"] + +Now go back to the *Management / Security / Users* page and assign these roles +to the appropriate users. Assign the `metricbeat_reader` role to your personal +user. Assign the `metricbeat_writer` role to the `logstash_internal` user. + +The list of users should now contain all of the built-in users as well as the +two you created. It should also show the appropriate roles for your users: + +[role="screenshot"] +image::security/images/management-users.jpg["User management screenshot in Kibana"] + +If you want to learn more about authorization and roles, see <>. + +[role="xpack"] +[[get-started-logstash-user]] +=== Add user information in {ls} + +In order for {ls} to send data successfully to {es}, you must configure its +authentication credentials in the {ls} configuration file. + +. Configure {ls} to use the `logstash_internal` user and the password that you +created: + +** If you don't mind having passwords visible in your configuration file, add +the following `user` and `password` settings in the `demo-metrics-pipeline.conf` +file in your {ls} directory: ++ +-- +[source,ruby] +---- +... + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + user => "logstash_internal" <1> + password => "your_password" <2> + } +} +---- +<1> Specify the `logstash_internal` user that you created earlier in this tutorial. +<2> Specify the password that you chose for this user ID. +-- + +** If you prefer not to put your user ID and password in the configuration file, +store them in a keystore instead. ++ +-- +Run the following commands to create the {ls} +keystore and add the secure settings: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +set +o history +export LOGSTASH_KEYSTORE_PASS=mypassword <1> +set -o history +./bin/logstash-keystore create +./bin/logstash-keystore add ES_USER +./bin/logstash-keystore add ES_PWD +---------------------------------------------------------------------- +<1> You can optionally protect access to the {ls} keystore by storing a password +in an environment variable called `LOGSTASH_KEYSTORE_PASS`. For more information, +see {logstash-ref}/keystore.html#keystore-password[Keystore password]. + +When prompted, specify the `logstash_internal` user and its password for the +`ES_USER` and `ES_PWD` values. + +NOTE: The {ls} keystore differs from the {kib} keystore. Whereas the {kib} +keystore enables you to store `kibana.yml` settings by name, the {ls} keystore +enables you to create arbitrary names that you can reference in the {ls} +configuration. To learn more, see +{logstash-ref}/keystore.html[Secrets keystore for secure settings]. + +You can now use these `ES_USER` and `ES_PWD` keys in your configuration +file. For example, add the `user` and `password` settings in the +`demo-metrics-pipeline.conf` file as follows: + +[source,ruby] +---- +... + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + user => "${ES_USER}" + password => "${ES_PWD}" + } +} +---- +-- + +. Start {ls} by using the appropriate method for your environment. ++ +-- +For example, to +run {ls} from a command line, go to the {ls} directory and enter the following +command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/logstash -f demo-metrics-pipeline.conf +---------------------------------------------------------------------- + +To start {ls} as a service, see +{logstash-ref}/running-logstash.html[Running {ls} as a service on Debian or RPM]. +-- + +. If you were connecting directly from {metricbeat} to {es}, you would need +to configure authentication credentials for the {es} output in the {metricbeat} +configuration file. In +{stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}], +however, you configured +{metricbeat} to send the data to {ls} for additional parsing, so no extra +settings are required in {metricbeat}. For more information, see +{metricbeat-ref}/securing-metricbeat.html[Securing {metricbeat}]. + +. Start {metricbeat} by using the appropriate method for your environment. ++ +-- +For example, on macOS, run the following command from the {metricbeat} directory: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./metricbeat -e +---------------------------------------------------------------------- + +For more methods, see {metricbeat-ref}/metricbeat-starting.html[Starting {metricbeat}]. +-- + +Wait a few minutes for new data to be sent from {metricbeat} to {ls} and {es}. + +[role="xpack"] +[[get-started-verify-users]] +=== View system metrics in {kib} + +Log in to {kib} with the user ID that has `metricbeat_reader` and `kibana_user` +roles (for example, `jdoe`). + +These roles enable the user to see the system metrics in {kib} (for example, on +the *Discover* page or in the +http://localhost:5601/app/kibana#/dashboard/Metricbeat-system-overview[{metricbeat} system overview dashboard]). + +[float] +[[gs-security-nextsteps]] +=== What's next? + +Congratulations! You've successfully set up authentication and authorization by +using the native realm. You learned how to create user IDs and roles that +prevent unauthorized access to the {stack}. + +Later, when you're ready to increase the number of nodes in your cluster, you'll +want to encrypt communications across the {stack}. To learn how, read +<>. + +For more detailed information about securing the {stack}, see: + +* <>. Encrypt +inter-node communications, set passwords for the built-in users, and manage your +users and roles. + +* {kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. +Set the authentication credentials in {kib} and encrypt communications between +the browser and the {kib} server. + +* {logstash-ref}/ls-security.html[Configuring security in Logstash]. Set the +authentication credentials for Logstash and encrypt communications between +Logstash and {es}. + +* <>. Configure authentication +credentials and encrypt connections to {es}. + +* <>. + +* {hadoop-ref}/security.html[Configuring {es} for Apache Hadoop to use secured transport]. + diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc new file mode 100644 index 0000000000000..e05991cac3b7d --- /dev/null +++ b/x-pack/docs/en/security/how-security-works.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[[how-security-works]] +== How security works + +An Elasticsearch cluster is typically made out of many moving parts. There are +the Elasticsearch nodes that form the cluster and often Logstash instances, +Kibana instances, Beats agents, and clients all communicating with the cluster. +It should not come as a surprise that securing such clusters has many facets and +layers. + +The {stack-security-features} provide the means to secure the Elastic cluster +on several levels: + + * <> + * <> + * Node/client authentication and channel encryption + * Auditing + +[float] +=== Node/client authentication and channel encryption + +The {security-features} support configuring SSL/TLS for securing the +communication channels to, from and within the cluster. This support accounts for: + + * Encryption of data transmitted over the wires + * Certificate based node authentication - preventing unauthorized nodes/clients + from establishing a connection with the cluster. + +For more information, see <>. + +The {security-features} also enable you to <> +which can be seen as a light mechanism for node/client authentication. With IP +filtering, you can restrict the nodes and clients that can connect to the +cluster based on their IP addresses. The IP filters configuration provides +whitelisting and blacklisting of IPs, subnets and DNS domains. + + +[float] +=== Auditing +When dealing with any secure system, it is critical to have a audit trail +mechanism set in place. Audit trails log various activities/events that occur in +the system, enabling you to analyze and back track past events when things go +wrong (e.g. security breach). + +The {security-features} provide such audit trail functionality for all nodes in +the cluster. You can configure the audit level which accounts for the type of +events that are logged. These events include failed authentication attempts, +user access denied, node connection denied, and more. + +For more information on auditing see <>. diff --git a/x-pack/docs/en/security/images/assign-role.jpg b/x-pack/docs/en/security/images/assign-role.jpg new file mode 100644 index 0000000000000..4771aa3b84f09 Binary files /dev/null and b/x-pack/docs/en/security/images/assign-role.jpg differ diff --git a/x-pack/docs/en/security/images/create-logstash-user.jpg b/x-pack/docs/en/security/images/create-logstash-user.jpg new file mode 100644 index 0000000000000..938ccb72ea3cf Binary files /dev/null and b/x-pack/docs/en/security/images/create-logstash-user.jpg differ diff --git a/x-pack/docs/en/security/images/create-reader-role.jpg b/x-pack/docs/en/security/images/create-reader-role.jpg new file mode 100644 index 0000000000000..4d301fcfe910e Binary files /dev/null and b/x-pack/docs/en/security/images/create-reader-role.jpg differ diff --git a/x-pack/docs/en/security/images/create-user.jpg b/x-pack/docs/en/security/images/create-user.jpg new file mode 100644 index 0000000000000..1ce905f3f545d Binary files /dev/null and b/x-pack/docs/en/security/images/create-user.jpg differ diff --git a/x-pack/docs/en/security/images/create-writer-role.jpg b/x-pack/docs/en/security/images/create-writer-role.jpg new file mode 100644 index 0000000000000..25ec820f36624 Binary files /dev/null and b/x-pack/docs/en/security/images/create-writer-role.jpg differ diff --git a/x-pack/docs/en/security/images/management-builtin-users.jpg b/x-pack/docs/en/security/images/management-builtin-users.jpg new file mode 100644 index 0000000000000..ec39d1f2b46dd Binary files /dev/null and b/x-pack/docs/en/security/images/management-builtin-users.jpg differ diff --git a/x-pack/docs/en/security/images/management-roles.jpg b/x-pack/docs/en/security/images/management-roles.jpg new file mode 100644 index 0000000000000..f8bb4af7d3f56 Binary files /dev/null and b/x-pack/docs/en/security/images/management-roles.jpg differ diff --git a/x-pack/docs/en/security/images/management-users.jpg b/x-pack/docs/en/security/images/management-users.jpg new file mode 100644 index 0000000000000..bea27be54a84c Binary files /dev/null and b/x-pack/docs/en/security/images/management-users.jpg differ diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc new file mode 100644 index 0000000000000..306b699ed4b7b --- /dev/null +++ b/x-pack/docs/en/security/index.asciidoc @@ -0,0 +1,39 @@ +[[secure-cluster]] += Secure a cluster + +[partintro] +-- +The {stack-security-features} enable you to easily secure a cluster. You can +password-protect your data as well as implement more advanced security +measures such as encrypting communications, role-based access control, +IP filtering, and auditing. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +-- + +include::overview.asciidoc[] +include::configuring-es.asciidoc[] +include::how-security-works.asciidoc[] +include::authentication/index.asciidoc[] +include::authorization/index.asciidoc[] +include::auditing/index.asciidoc[] +include::securing-communications/index.asciidoc[] +include::using-ip-filtering.asciidoc[] +include::ccs-clients-integrations/index.asciidoc[] +include::get-started-security.asciidoc[] +include::securing-communications/tutorial-tls-intro.asciidoc[] +include::troubleshooting.asciidoc[] +include::limitations.asciidoc[] diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc new file mode 100644 index 0000000000000..8a0561254b475 --- /dev/null +++ b/x-pack/docs/en/security/limitations.asciidoc @@ -0,0 +1,92 @@ +[role="xpack"] +[[security-limitations]] +== Security limitations +++++ +Limitations +++++ + +[float] +=== Plugins + +Elasticsearch's plugin infrastructure is extremely flexible in terms of what can +be extended. While it opens up Elasticsearch to a wide variety of (often custom) +additional functionality, when it comes to security, this high extensibility level +comes at a cost. We have no control over the third-party plugins' code (open +source or not) and therefore we cannot guarantee their compliance with +{stack-security-features}. For this reason, third-party plugins are not +officially supported on clusters with {security-features} enabled. + +[float] +=== Changes in index wildcard behavior + +Elasticsearch clusters with the {security-features} enabled apply the `/_all` +wildcard, and all other wildcards, to the indices that the current user has +privileges for, not the set of all indices on the cluster. + +[float] +=== Multi document APIs + +Multi get and multi term vectors API throw IndexNotFoundException when trying to access non existing indices that the user is +not authorized for. By doing that they leak information regarding the fact that the index doesn't exist, while the user is not +authorized to know anything about those indices. + +[float] +=== Filtered index aliases + +Aliases containing filters are not a secure way to restrict access to individual +documents, due to the limitations described in +<>. +The {stack-security-features} provide a secure way to restrict access to +documents through the +<> feature. + +[float] +=== Field and document level security limitations + +When a user's role enables document or field level security for an index: + +* The user cannot perform write operations: +** The update API isn't supported. +** Update requests included in bulk requests aren't supported. +* The request cache is disabled for search requests. + +When a user's role enables document level security for an index: + +* Document level security isn't applied for APIs that aren't document based. + An example is the field stats API. +* Document level security doesn't affect global index statistics that relevancy + scoring uses. So this means that scores are computed without taking the role + query into account. Note that documents not matching with the role query are + never returned. +* The `has_child` and `has_parent` queries aren't supported as query in the + role definition. The `has_child` and `has_parent` queries can be used in the + search API with document level security enabled. +* Any query that makes remote calls to fetch data to query by isn't supported. + The following queries aren't supported: +** The `terms` query with terms lookup isn't supported. +** The `geo_shape` query with indexed shapes isn't supported. +** The `percolate` query isn't supported. +* If suggesters are specified and document level security is enabled then + the specified suggesters are ignored. +* A search request cannot be profiled if document level security is enabled. + +[float] +[[alias-limitations]] +=== Index and field names can be leaked when using aliases + +Calling certain Elasticsearch APIs on an alias can potentially leak information +about indices that the user isn't authorized to access. For example, when you get +the mappings for an alias with the `_mapping` API, the response includes the +index name and mappings for each index that the alias applies to. + +Until this limitation is addressed, avoid index and field names that contain +confidential or sensitive information. + +[float] +=== LDAP realm + +The <> does not currently support the discovery of nested +LDAP Groups. For example, if a user is a member of `group_1` and `group_1` is a +member of `group_2`, only `group_1` will be discovered. However, the +<> *does* support transitive +group membership. diff --git a/x-pack/docs/en/security/overview.asciidoc b/x-pack/docs/en/security/overview.asciidoc new file mode 100644 index 0000000000000..c06f67b9e92f8 --- /dev/null +++ b/x-pack/docs/en/security/overview.asciidoc @@ -0,0 +1,63 @@ +[role="xpack"] +[[elasticsearch-security]] +== Security overview +++++ +Overview +++++ + +Security protects {es} clusters by: + +* <> + with password protection, role-based access control, and IP filtering. +* <> + with SSL/TLS encryption. +* <> + so you know who's doing what to your cluster and the data it stores. + +[float] +[[preventing-unauthorized-access]] +=== Preventing unauthorized access + +To prevent unauthorized access to your {es} cluster, you must have a +way to _authenticate_ users. This simply means that you need a way to validate +that a user is who they claim to be. For example, you have to make sure only +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. The +{es-security-features} provide a standalone authentication mechanism that enables +you to quickly password-protect your cluster. If you're already using LDAP, +Active Directory, or PKI to manage users in your organization, the +{security-features} are able to integrate with those systems to perform user +authentication. + +In many cases, simply authenticating users isn't enough. You also need a way to +control what data users have access to and what tasks they can perform. The +{es-security-features} enable you to _authorize_ users by assigning access +_privileges_ to _roles_ and assigning those roles to users. For example, this +role-based access control mechanism (a.k.a RBAC) enables you to specify that the +user `kandorra` can only perform read operations on the `events` index and can't +do anything at all with other indices. + +The {security-features} also support IP-based authorization. +You can whitelist and blacklist specific IP addresses or subnets to control +network-level access to a server. + +[float] +[[preserving-data-integrity]] +=== Preserving data integrity + +A critical part of security is keeping confidential data confidential. +{es} has built-in protections against accidental data loss and +corruption. However, there's nothing to stop deliberate tampering or data +interception. The {stack-security-features} preserve the integrity of your +data by encrypting communications to and from nodes. For even +greater protection, you can increase the <>. + +[float] +[[maintaining-audit-trail]] +=== Maintaining an audit trail + +Keeping a system secure takes vigilance. By using {stack-security-features} to +maintain an audit trail, you can easily see who is accessing your cluster and +what they're doing. By analyzing access patterns and failed attempts to access +your cluster, you can gain insights into attempted attacks and data breaches. +Keeping an auditable log of the activity in your cluster can also help diagnose +operational issues. diff --git a/docs/reference/security/reference/files.asciidoc b/x-pack/docs/en/security/reference/files.asciidoc similarity index 100% rename from docs/reference/security/reference/files.asciidoc rename to x-pack/docs/en/security/reference/files.asciidoc diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc similarity index 96% rename from docs/reference/security/securing-communications/configuring-tls-docker.asciidoc rename to x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc index 2588a49fc72b9..1d23430e37eec 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -2,10 +2,8 @@ [[configuring-tls-docker]] === Encrypting communications in an {es} Docker Container -Starting with version 6.0.0, {stack} {security-features} -(Gold, Platinum or Enterprise subscriptions) -https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS] -encryption for the transport networking layer. +Unless you are using a trial license, {stack} {security-features} require +SSL/TLS encryption for the transport networking layer. This section demonstrates an easy path to get started with SSL/TLS for both HTTPS and transport using the {es} Docker image. The example uses @@ -108,6 +106,7 @@ services: image: {docker-image} environment: - node.name=es01 + - discovery.seed_hosts=es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> - "ES_JAVA_OPTS=-Xms512m -Xmx512m" diff --git a/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc b/x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc similarity index 96% rename from docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc rename to x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc index 51d5e5f6de650..4e51f5e43ff24 100644 --- a/docs/reference/security/securing-communications/enabling-cipher-suites.asciidoc +++ b/x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[ciphers]] -=== Enabling Cipher Suites for Stronger Encryption +=== Enabling cipher suites for stronger encryption The TLS and SSL protocols use a cipher suite that determines the strength of encryption used to protect the data. You may want to increase the strength of diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications/index.asciidoc similarity index 51% rename from x-pack/docs/en/security/securing-communications.asciidoc rename to x-pack/docs/en/security/securing-communications/index.asciidoc index 63fded729eb8c..90989901b15eb 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications/index.asciidoc @@ -5,8 +5,7 @@ Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, manipulation of the data, and attempts to gain access to the server and thus the -files storing the data. Securing your nodes is required in order to use a production -license that enables {security-features} and helps reduce the risk from +files storing the data. Securing your nodes helps reduce the risk from network-based attacks. This section shows how to: @@ -18,15 +17,5 @@ This section shows how to: The authentication of new nodes helps prevent a rogue node from joining the cluster and receiving data through replication. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/setting-up-ssl.asciidoc -include::{es-repo-dir}/security/securing-communications/setting-up-ssl.asciidoc[] +include::setting-up-ssl.asciidoc[] -[[ciphers]] -=== Enabling cipher suites for stronger encryption - -See {ref}/ciphers.html[Enabling Cipher Suites for Stronger Encryption]. - -[[separating-node-client-traffic]] -=== Separating node-to-node and client traffic - -See {ref}/separating-node-client-traffic.html[Separating node-to-node and client traffic]. diff --git a/docs/reference/security/securing-communications/node-certificates.asciidoc b/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc similarity index 98% rename from docs/reference/security/securing-communications/node-certificates.asciidoc rename to x-pack/docs/en/security/securing-communications/node-certificates.asciidoc index b2f5e95b09999..1d25d8eb1dd80 100644 --- a/docs/reference/security/securing-communications/node-certificates.asciidoc +++ b/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc @@ -41,7 +41,7 @@ the file and key. If you plan to add more nodes to your cluster in the future, retain a copy of the file and remember its password. -- -. Generate a certificate and private key for for each node in your cluster. +. Generate a certificate and private key for each node in your cluster. + -- For example, use the `elasticsearch-certutil cert` command: diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc similarity index 63% rename from docs/reference/security/securing-communications/securing-elasticsearch.asciidoc rename to x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index 9d207f26a96b6..635c8a1450f5d 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -7,8 +7,8 @@ your {es} cluster. Connections are secured using Transport Layer Security (TLS/SSL). WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you +have a trial license, you must configure SSL/TLS for internode-communication. To enable encryption, you need to perform the following steps on each node in the cluster: @@ -31,17 +31,12 @@ information, see <>. For more information about encrypting communications across the Elastic Stack, see {stack-ov}/encrypting-communications.html[Encrypting Communications]. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/node-certificates.asciidoc include::node-certificates.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-transport.asciidoc include::tls-transport.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-http.asciidoc include::tls-http.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ad.asciidoc include::tls-ad.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ldap.asciidoc include::tls-ldap.asciidoc[] \ No newline at end of file diff --git a/docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc b/x-pack/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc similarity index 100% rename from docs/reference/security/securing-communications/separating-node-client-traffic.asciidoc rename to x-pack/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc similarity index 70% rename from docs/reference/security/securing-communications/setting-up-ssl.asciidoc rename to x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc index 90f9b040d9d54..68eda2cdc3e09 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc @@ -1,16 +1,15 @@ [[ssl-tls]] -=== Setting Up TLS on a cluster +=== Setting up TLS on a cluster -The {stack} {security-features} enables you to encrypt traffic to, from, and +The {stack} {security-features} enable you to encrypt traffic to, from, and within your {es} cluster. Connections are secured using Transport Layer Security (TLS), which is commonly referred to as "SSL". WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you have a trial license, you must configure SSL/TLS for internode-communication. The following steps describe how to enable encryption across the various -components of the Elastic Stack. You must perform each of the steps that are +components of the {stack}. You must perform each of the steps that are applicable to your cluster. . Generate a private key and X.509 certificate for each of your {es} nodes. See @@ -22,14 +21,14 @@ enable TLS on the HTTP layer. See {ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. -. Configure {monitoring} to use encrypted connections. See <>. +. Configure the {monitor-features} to use encrypted connections. See <>. . Configure {kib} to encrypt communications between the browser and the {kib} server and to connect to {es} via HTTPS. See -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. +{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. . Configure Logstash to use TLS encryption. See -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. +{logstash-ref}/ls-security.html[Configuring security in {ls}]. . Configure Beats to use encrypted connections. See <>. diff --git a/docs/reference/security/securing-communications/tls-ad.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc similarity index 100% rename from docs/reference/security/securing-communications/tls-ad.asciidoc rename to x-pack/docs/en/security/securing-communications/tls-ad.asciidoc diff --git a/docs/reference/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc similarity index 100% rename from docs/reference/security/securing-communications/tls-http.asciidoc rename to x-pack/docs/en/security/securing-communications/tls-http.asciidoc diff --git a/docs/reference/security/securing-communications/tls-ldap.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc similarity index 100% rename from docs/reference/security/securing-communications/tls-ldap.asciidoc rename to x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc diff --git a/docs/reference/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc similarity index 100% rename from docs/reference/security/securing-communications/tls-transport.asciidoc rename to x-pack/docs/en/security/securing-communications/tls-transport.asciidoc diff --git a/x-pack/docs/en/security/securing-communications/tutorial-tls-addnodes.asciidoc b/x-pack/docs/en/security/securing-communications/tutorial-tls-addnodes.asciidoc new file mode 100644 index 0000000000000..992dd6d9ab282 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tutorial-tls-addnodes.asciidoc @@ -0,0 +1,190 @@ +[role="xpack"] +[testenv="basic"] +[[encrypting-communications-hosts]] +=== Add nodes to your cluster + +You can add more nodes to your cluster and optionally designate specific +purposes for each node. For example, you can allocate master nodes, data nodes, +ingest nodes, machine learning nodes, and dedicated coordinating nodes. For +details about each node type, see <>. + +Let's add two nodes to our cluster! + +. Install two additional copies of {es}. It's possible to run multiple {es} +nodes using a shared installation. In this tutorial, however, we're keeping +things simple by using the `zip` or `tar.gz` packages and by putting each copy +in a separate folder. You can simply repeat the steps that you used to install +{es} in the +{stack-gs}/get-started-elastic-stack.html#install-elasticsearch[Getting started with the {stack}] +tutorial. + +. Generate certificates for the two new nodes. ++ +-- +For example, run the following command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch-certutil cert \ +--ca elastic-stack-ca.p12 \ <1> +--multiple +---------------------------------------------------------------------- +<1> Use the certificate authority that you created in <>. + +You are prompted for information about each new node. Specify `node-2` and +`node-3` for the instance names. For the purposes of this tutorial, specify the +same IP address (`127.0.0.1,::1`) and DNS name (`localhost`) for each node. + +You are prompted to enter the password for your CA. You are also prompted to +create a password for each certificate. + +By default, the command produces a zip file named `certificate-bundle.zip`, +which contains the generated certificates and keys. +-- + +. Decompress the `certificate-bundle.zip` file. For example: ++ +-- +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +unzip certificate-bundle.zip + +Archive: certificate-bundle.zip + creating: node-2/ + inflating: node-2/node-2.p12 + creating: node-3/ + inflating: node-3/node-3.p12 +---------------------------------------------------------------------- + +The `certificate-bundle.zip` file contains a folder for each of your nodes. Each +folder contains a single PKCS#12 keystore that includes a node certificate, +node key, and CA certificate. +-- + +. Create a folder to contain certificates in the configuration directory of each +{es} node. For example, create a `certs` folder in the `config` directory. + +. Copy the appropriate certificate to the configuration directory on each node. +For example, copy the `node-2.p12` file into the `config/certs` directory on the +second node and the `node-3.p12` into the `config/certs` directory on the third +node. + +. Specify the name of the cluster and give each node a unique name. ++ +-- +For example, add the following settings to the `ES_PATH_CONF/elasticsearch.yml` +file on the second node: + +[source,yaml] +---- +cluster.name: test-cluster +node.name: node-2 +---- + +Add the following settings to the `ES_PATH_CONF/elasticsearch.yml` +file on the third node: + +[source,yaml] +---- +cluster.name: test-cluster +node.name: node-3 +---- + +NOTE: In order to join the same cluster as the first node, they must share the +same `cluster.name` value. + +-- + +. (Optional) Provide seed addresses to help your nodes discover other nodes with +which to form a cluster. ++ +-- +For example, add the following setting in the `ES_PATH_CONF/elasticsearch.yml` +file: + +[source,yaml] +---- +discovery.seed_hosts: ["localhost"] +---- + +The default value for this setting is `127.0.0.1, [::1]`, therefore it isn't +actually required in this tutorial. When you want to form a cluster with nodes +on other hosts, however, you must use this setting to provide a list of +master-eligible nodes to seed the discovery process. For more information, see +<>. +-- + +. On each node, enable TLS for transport communications. You must also configure +each node to identify itself using its signed certificate. ++ +-- +include::tutorial-tls-internode.asciidoc[tag=enable-tls] +-- + +. On each node, store the password for the PKCS#12 file in the {es} keystore. ++ +-- +include::tutorial-tls-internode.asciidoc[tag=secure-passwords] + +On the second node, supply the password that you created for the `node-2.p12` +file. On the third node, supply the password that you created for the +`node-3.p12` file. +-- + +. Start each {es} node. For example, if you installed {es} with a `.tar.gz` +package, run the following command from each {es} directory: ++ +-- +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch +---------------------------------------------------------------------- + +See <>. + +If you encounter errors, you can see some common problems and solutions in +<>. +-- + +. Verify that your cluster now contains three nodes. ++ +-- +For example, log into {kib} with the `elastic` built-in user. Go to +*Dev Tools > Console* and run the <>: + +[source,js] +---------------------------------- +GET _cluster/health +---------------------------------- +// CONSOLE + +Confirm the `number_of_nodes` in the response from this API. + +You can also use the <> to identify the master +node: + +[source,js] +---------------------------------- +GET _cat/nodes?v +---------------------------------- +// CONSOLE + +The node that has an asterisk(*) in the `master` column is the elected master +node. +-- + +Now that you have multiple nodes, your data can be distributed across the +cluster in multiple primary and replica shards. For more information about the +concepts of clusters, nodes, and shards, see +<>. + +[float] +[[encrypting-internode-nextsteps]] +=== What's next? + +Congratulations! You've encrypted communications between the nodes in your +cluster and can pass the +<>. + +If you want to encrypt communications between other products in the {stack}, see +<>. diff --git a/x-pack/docs/en/security/securing-communications/tutorial-tls-certificates.asciidoc b/x-pack/docs/en/security/securing-communications/tutorial-tls-certificates.asciidoc new file mode 100644 index 0000000000000..b2e599692ff0b --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tutorial-tls-certificates.asciidoc @@ -0,0 +1,77 @@ +[role="xpack"] +[testenv="basic"] +[[encrypting-communications-certificates]] +=== Generate certificates + +In a secured cluster, {es} nodes use certificates to identify themselves when +communicating with other nodes. + +The cluster must validate the authenticity of these certificates. The +recommended approach is to trust a specific certificate authority (CA). Thus +when nodes are added to your cluster they just need to use a certificate signed +by the same CA. + +. Generate a certificate authority for your cluster. ++ +-- +Run the following command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch-certutil ca +---------------------------------------------------------------------- + +You are prompted for an output filename and a password. In this tutorial, we'll +use the default filename (`elastic-stack-ca.p12`). + +The output file is a PKCS#12 keystore that contains the public certificate for +your certificate authority and the private key that is used to sign the node +certificates. + +TIP: We'll need to use this file again when we add nodes to the cluster, so +remember its location and password. Ideally you should store the file securely, +since it holds the key to your cluster. + +For more information about this command, see +<>. +-- + +. Create a folder to contain certificates in the configuration directory of your +{es} node. For example, create a `certs` folder in the `config` directory. + +. Generate certificates and private keys for the first node in your cluster. ++ +-- +Run the following command: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch-certutil cert \ +--ca elastic-stack-ca.p12 \ <1> +--dns localhost \ <2> +--ip 127.0.0.1,::1 <3> +--out config/certs/node-1.p12 <4> +---------------------------------------------------------------------- +<1> The `--ca` parameter contains the name of certificate authority that you +generated for this cluster. +<2> The `--dns` parameter contains a comma-separated list of DNS names for the +node. +<3> The `--ip` parameter contains a comma-separated list of IP addresses for the +node. +<4> The `--out` parameter contains the name and location of the generated +certificate. Ideally the file name matches the `node.name` value in the +`elasticsearch.yml` file. + +You are prompted to enter the password for your CA. You are also prompted to +create a password for the certificate. + +The output file is a PKCS#12 keystore that includes a node certificate, node key, +and CA certificate. +-- + +TIP: The {ref}/certutil.html[elasticsearch-certutil] command has a lot more +options. For example, it can generate Privacy Enhanced Mail (PEM) formatted +certificates and keys. It can also generate certificate signing requests (CSRs) +that you can use to obtain signed certificates from a commercial or +organization-specific certificate authority. However, those options are not +covered in this tutorial. diff --git a/x-pack/docs/en/security/securing-communications/tutorial-tls-internode.asciidoc b/x-pack/docs/en/security/securing-communications/tutorial-tls-internode.asciidoc new file mode 100644 index 0000000000000..2bda53ae04fdc --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tutorial-tls-internode.asciidoc @@ -0,0 +1,177 @@ +[role="xpack"] +[testenv="trial"] +[[encrypting-internode]] +=== Encrypt internode communications + +Now that we've generated a certificate authority and certificates, let's update +the cluster to use these files. + +IMPORTANT: When you enable {es} {security-features}, unless you have a trial +license, you must use Transport Layer Security (TLS) to encrypt internode +communication. By following the steps in this tutorial tutorial, you learn how +to meet the minimum requirements to pass the +<>. + +. (Optional) Name the cluster. ++ +-- +For example, add the <> setting in the +`ES_PATH_CONF/elasticsearch.yml` file: + +[source,yaml] +---- +cluster.name: test-cluster +---- + +TIP: The `ES_PATH_CONF` environment variable contains the path for the {es} +configuration files. If you installed {es} using archive distributions (`zip` or +`tar.gz`), it defaults to `ES_HOME/config`. If you used package distributions +(Debian or RPM), it defaults to `/etc/elasticsearch`. For more information, see +<>. + +The default cluster name is `elasticsearch`. You should choose a unique name, +however, to ensure that your nodes join the right cluster. +-- + +. (Optional) Name the {es} node. ++ +-- +For example, add the <> setting in the +`ES_PATH_CONF/elasticsearch.yml` file: + +[source,yaml] +---- +node.name: node-1 +---- + +In this tutorial, the cluster will consist of three nodes that exist on the same +machine and share the same (loopback) IP address and hostname. Therefore, we +must give each node a unique name. + +This step is also necessary if you want to use the `node.name` value to define +the location of certificates in subsequent steps. +-- + +. Disable single-node discovery. ++ +-- +To enable {es} to form a multi-node cluster, use the default value for the +`discovery.type` setting. If that setting exists in your +`ES_PATH_CONF/elasticsearch.yml` file, remove it. +-- + +. (Optional) If you are starting the cluster for the first time, specify the +initial set of master-eligible nodes. ++ +-- +For example, add the following setting in the `ES_PATH_CONF/elasticsearch.yml` +file: + +[source,yaml] +---- +cluster.initial_master_nodes: ["node-1"] +---- + +If you start an {es} node without configuring this setting or any other +discovery settings, it will start up in development mode and auto-bootstrap +itself into a new cluster. + +TIP: If you are starting a cluster with multiple master-eligible nodes for the +first time, add all of those node names to the `cluster.initial_master_nodes` +setting. + +See <> and +<>. +-- + +. Enable Transport Layer Security (TLS/SSL) for transport (internode) +communications. ++ +-- +// tag::enable-tls[] +For example, add the following settings in the `ES_PATH_CONF/elasticsearch.yml` +file: + +[source,yaml] +---- +xpack.security.enabled: true +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.keystore.path: certs/${node.name}.p12 <1> +xpack.security.transport.ssl.truststore.path: certs/${node.name}.p12 +---- +<1> If the file name for your certificate does not match the `node.name` value, +you must put the appropriate file name in the `elasticsearch.yml` file. +// end::enable-tls[] + +NOTE: The PKCS#12 keystore that is output by the `elasticsearch-certutil` can be +used as both a keystore and a truststore. If you use other tools to manage and +generate your certificates, you might have different values for these settings, +but that scenario is not covered in this tutorial. + +For more information, see <> and +<>. +-- + +. Store the password for the PKCS#12 file in the {es} keystore. ++ +-- +// tag::secure-passwords[] +For example, run the following commands: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch-keystore create <1> +./bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password +./bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password +---------------------------------------------------------------------- +<1> If the {es} keystore already exists, this command asks whether you want to +overwrite it. You do not need to overwrite it; you can simply add settings to +your existing {es} keystore. +// end::secure-passwords[] + +You are prompted to supply the password that you created for the `node-1.p12` +file. We are using this file for both the transport TLS keystore and truststore, +therefore supply the same password for both of these settings. +-- + +. <>. ++ +-- +For example, if you installed {es} with a `.tar.gz` package, run the following +command from the {es} directory: + +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/elasticsearch +---------------------------------------------------------------------- +-- + +. Create passwords for the built-in users and configure {kib} to use them. ++ +-- +NOTE: If you already configured passwords for these users in other tutorials, +you can skip this step. + +include::{xes-repo-dir}/security/get-started-builtin-users.asciidoc[tag=create-users] + +After you setup the password for the `kibana` built-in user, +<>. + +For example, run the following commands to create the {kib} keystore and add the +`kibana` built-in user and its password in secure settings: + +include::{xes-repo-dir}/security/get-started-kibana-users.asciidoc[tag=store-kibana-user] +-- + +. Start {kib}. ++ +-- +For example, if you installed {kib} with a `.tar.gz` package, run the following +command from the {kib} directory: +["source","sh",subs="attributes,callouts"] +---------------------------------------------------------------------- +./bin/kibana +---------------------------------------------------------------------- + +See {kibana-ref}/start-stop.html[Starting and stopping {kib}]. +-- diff --git a/x-pack/docs/en/security/securing-communications/tutorial-tls-intro.asciidoc b/x-pack/docs/en/security/securing-communications/tutorial-tls-intro.asciidoc new file mode 100644 index 0000000000000..5ee53001a7825 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tutorial-tls-intro.asciidoc @@ -0,0 +1,46 @@ +[role="xpack"] +[testenv="basic"] +[[encrypting-internode-communications]] +== Tutorial: Encrypting communications + +In the {stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}] +and <> tutorials, we +used a cluster with a single {es} node to get up and running with the {stack}. + +You can add as many nodes as you want in a cluster but they must be able to +communicate with each other. The communication between nodes in a cluster is +handled by the <>. To secure your +cluster, you must ensure that the internode communications are encrypted. + +NOTE: In this tutorial, we add more nodes by installing more copies of {es} on +the same machine. By default, {es} binds to loopback addresses for HTTP and +transport communication. That is fine for the purposes of this tutorial and for +downloading and experimenting with {es} in a test or development environment. +When you are deploying a production environment, however, you are generally +adding nodes on different machines so that your cluster is resilient to outages +and avoids data loss. In a production scenario, there are additional +requirements that are not covered in this tutorial. See +<> and <>. + +[float] +[[encrypting-internode-prerequisites]] +=== Before you begin + +Ideally, you should do this tutorial after you complete the +{stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}] and +<> tutorials. + +At a minimum, you must install and configure {es} and {kib} in a cluster with a +single {es} node. In particular, this tutorial provides instructions for adding +nodes that work with the `zip` and `tar.gz` packages. + +IMPORTANT: To complete this tutorial, you must install the default {es} and +{kib} packages, which include the encrypted communications {security-features}. +When you install these products, they apply basic licenses with no expiration +dates. All of the subsequent steps in this tutorial assume that you are using a +basic license. For more information, see {subscriptions} and +{stack-ov}/license-management.html[License management]. + +include::tutorial-tls-certificates.asciidoc[] +include::tutorial-tls-internode.asciidoc[] +include::tutorial-tls-addnodes.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc new file mode 100644 index 0000000000000..af334bd6f1b96 --- /dev/null +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -0,0 +1,794 @@ +[role="xpack"] +[[security-troubleshooting]] +== Troubleshooting security +++++ +Troubleshooting +++++ + +Use the information in this section to troubleshoot common problems and find +answers for frequently asked questions. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +For issues that you cannot fix yourself … we’re here to help. +If you are an existing Elastic customer with a support contract, please create +a ticket in the +https://support.elastic.co/customers/s/login/[Elastic Support portal]. +Or post in the https://discuss.elastic.co/[Elastic forum]. + +[[security-trb-settings]] +=== Some settings are not returned via the nodes settings API + +*Symptoms:* + +* When you use the <> to retrieve +settings for a node, some information is missing. + +*Resolution:* + +This is intentional. Some of the settings are considered to be highly +sensitive: all `ssl` settings, ldap `bind_dn`, and `bind_password`. +For this reason, we filter these settings and do not expose them via +the nodes info API rest endpoint. You can also define additional +sensitive settings that should be hidden using the +`xpack.security.hide_settings` setting. For example, this snippet +hides the `url` settings of the `ldap1` realm and all settings of the +`ad1` realm. + +[source, yaml] +------------------------------------------ +xpack.security.hide_settings: xpack.security.authc.realms.ldap1.url, +xpack.security.authc.realms.ad1.* +------------------------------------------ + +[[security-trb-roles]] +=== Authorization exceptions + +*Symptoms:* + +* I configured the appropriate roles and the users, but I still get an +authorization exception. +* I can authenticate to LDAP, but I still get an authorization exception. + + +*Resolution:* + +. Verify that the role names associated with the users match the roles defined +in the `roles.yml` file. You can use the `elasticsearch-users` tool to list all +the users. Any unknown roles are marked with `*`. ++ +-- +[source, shell] +------------------------------------------ +bin/elasticsearch-users list +rdeniro : admin +alpacino : power_user +jacknich : monitoring,unknown_role* <1> +------------------------------------------ +<1> `unknown_role` was not found in `roles.yml` + +For more information about this command, see the +<>. +-- + +. If you are authenticating to LDAP, a number of configuration options can cause +this error. ++ +-- +|====================== +|_group identification_ | + +Groups are located by either an LDAP search or by the "memberOf" attribute on +the user. Also, If subtree search is turned off, it will search only one +level deep. See the <> for all the options. +There are many options here and sticking to the defaults will not work for all +scenarios. + +| _group to role mapping_| + +Either the `role_mapping.yml` file or the location for this file could be +misconfigured. For more information, see <>. + +|_role definition_| + +The role definition might be missing or invalid. + +|====================== + +To help track down these possibilities, add the following lines to the end of +the `log4j2.properties` configuration file in the `ES_PATH_CONF`: + +[source,properties] +---------------- +logger.authc.name = org.elasticsearch.xpack.security.authc +logger.authc.level = DEBUG +---------------- + +A successful authentication should produce debug statements that list groups and +role mappings. +-- + +[[security-trb-extraargs]] +=== Users command fails due to extra arguments + +*Symptoms:* + +* The `elasticsearch-users` command fails with the following message: +`ERROR: extra arguments [...] were provided`. + +*Resolution:* + +This error occurs when the `elasticsearch-users` tool is parsing the input and +finds unexpected arguments. This can happen when there are special characters +used in some of the arguments. For example, on Windows systems the `,` character +is considered a parameter separator; in other words `-r role1,role2` is +translated to `-r role1 role2` and the `elasticsearch-users` tool only +recognizes `role1` as an expected parameter. The solution here is to quote the +parameter: `-r "role1,role2"`. + +For more information about this command, see +<>. + +[[trouble-shoot-active-directory]] +=== Users are frequently locked out of Active Directory + +*Symptoms:* + +* Certain users are being frequently locked out of Active Directory. + +*Resolution:* + +Check your realm configuration; realms are checked serially, one after another. +If your Active Directory realm is being checked before other realms and there +are usernames that appear in both Active Directory and another realm, a valid +login for one realm might be causing failed login attempts in another realm. + +For example, if `UserA` exists in both Active Directory and a file realm, and +the Active Directory realm is checked first and file is checked second, an +attempt to authenticate as `UserA` in the file realm would first attempt to +authenticate against Active Directory and fail, before successfully +authenticating against the `file` realm. Because authentication is verified on +each request, the Active Directory realm would be checked - and fail - on each +request for `UserA` in the `file` realm. In this case, while the authentication +request completed successfully, the account on Active Directory would have +received several failed login attempts, and that account might become +temporarily locked out. Plan the order of your realms accordingly. + +Also note that it is not typically necessary to define multiple Active Directory +realms to handle domain controller failures. When using Microsoft DNS, the DNS +entry for the domain should always point to an available domain controller. + + +[[trb-security-maccurl]] +=== Certificate verification fails for curl on Mac + +*Symptoms:* + +* `curl` on the Mac returns a certificate verification error even when the +`--cacert` option is used. + + +*Resolution:* + +Apple's integration of `curl` with their keychain technology disables the +`--cacert` option. +See http://curl.haxx.se/mail/archive-2013-10/0036.html for more information. + +You can use another tool, such as `wget`, to test certificates. Alternately, you +can add the certificate for the signing certificate authority MacOS system +keychain, using a procedure similar to the one detailed at the +http://support.apple.com/kb/PH14003[Apple knowledge base]. Be sure to add the +signing CA's certificate and not the server's certificate. + + +[[trb-security-sslhandshake]] +=== SSLHandshakeException causes connections to fail + +*Symptoms:* + +* A `SSLHandshakeException` causes a connection to a node to fail and indicates +that there is a configuration issue. Some of the common exceptions are shown +below with tips on how to resolve these issues. + + +*Resolution:* + +`java.security.cert.CertificateException: No name matching node01.example.com found`:: ++ +-- +Indicates that a client connection was made to `node01.example.com` but the +certificate returned did not contain the name `node01.example.com`. In most +cases, the issue can be resolved by ensuring the name is specified during +certificate creation. For more information, see <>. Another scenario is +when the environment does not wish to use DNS names in certificates at all. In +this scenario, all settings in `elasticsearch.yml` should only use IP addresses +including the `network.publish_host` setting. +-- + +`java.security.cert.CertificateException: No subject alternative names present`:: ++ +-- +Indicates that a client connection was made to an IP address but the returned +certificate did not contain any `SubjectAlternativeName` entries. IP addresses +are only used for hostname verification if they are specified as a +`SubjectAlternativeName` during certificate creation. If the intent was to use +IP addresses for hostname verification, then the certificate will need to be +regenerated with the appropriate IP address. See <>. +-- + +`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`:: ++ +-- +The `SSLHandshakeException` indicates that a self-signed certificate was +returned by the client that is not trusted as it cannot be found in the +`truststore` or `keystore`. This `SSLException` is seen on the client side of +the connection. +-- + +`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`:: ++ +-- +This `SunCertPathBuilderException` indicates that a certificate was returned +during the handshake that is not trusted. This message is seen on the client +side of the connection. The `SSLException` is seen on the server side of the +connection. The CA certificate that signed the returned certificate was not +found in the `keystore` or `truststore` and needs to be added to trust this +certificate. +-- + +[[trb-security-ssl]] +=== Common SSL/TLS exceptions + +*Symptoms:* + +* You might see some exceptions related to SSL/TLS in your logs. Some of the +common exceptions are shown below with tips on how to resolve these issues. + + + + +*Resolution:* + +`WARN: received plaintext http traffic on a https channel, closing connection`:: ++ +-- +Indicates that there was an incoming plaintext http request. This typically +occurs when an external applications attempts to make an unencrypted call to the +REST interface. Please ensure that all applications are using `https` when +calling the REST interface with SSL enabled. +-- + +`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record:`:: ++ +-- +Indicates that there was incoming plaintext traffic on an SSL connection. This +typically occurs when a node is not configured to use encrypted communication +and tries to connect to nodes that are using encrypted communication. Please +verify that all nodes are using the same setting for +`xpack.security.transport.ssl.enabled`. + +For more information about this setting, see +<>. +-- + +`java.io.StreamCorruptedException: invalid internal transport message format, got`:: ++ +-- +Indicates an issue with data received on the transport interface in an unknown +format. This can happen when a node with encrypted communication enabled +connects to a node that has encrypted communication disabled. Please verify that +all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. + +For more information about this setting, see +<>. +-- + +`java.lang.IllegalArgumentException: empty text`:: ++ +-- +This exception is typically seen when a `https` request is made to a node that +is not using `https`. If `https` is desired, please ensure the following setting +is in `elasticsearch.yml`: + +[source,yaml] +---------------- +xpack.security.http.ssl.enabled: true +---------------- + +For more information about this setting, see +<>. +-- + +`ERROR: unsupported ciphers [...] were requested but cannot be used in this JVM`:: ++ +-- +This error occurs when a SSL/TLS cipher suite is specified that cannot supported +by the JVM that {es} is running in. Security tries to use the specified cipher +suites that are supported by this JVM. This error can occur when using the +Security defaults as some distributions of OpenJDK do not enable the PKCS11 +provider by default. In this case, we recommend consulting your JVM +documentation for details on how to enable the PKCS11 provider. + +Another common source of this error is requesting cipher suites that use +encrypting with a key length greater than 128 bits when running on an Oracle JDK. +In this case, you must install the +<>. +-- + +[[trb-security-kerberos]] +=== Common Kerberos exceptions + +*Symptoms:* + +* User authentication fails due to either GSS negotiation failure +or a service login failure (either on the server or in the {es} http client). +Some of the common exceptions are listed below with some tips to help resolve +them. + +*Resolution:* + +`Failure unspecified at GSS-API level (Mechanism level: Checksum failed)`:: ++ +-- + +When you see this error message on the HTTP client side, then it may be +related to an incorrect password. + +When you see this error message in the {es} server logs, then it may be +related to the {es} service keytab. The keytab file is present but it failed +to log in as the user. Please check the keytab expiry. Also check whether the +keytab contain up-to-date credentials; if not, replace them. + +You can use tools like `klist` or `ktab` to list principals inside +the keytab and validate them. You can use `kinit` to see if you can acquire +initial tickets using the keytab. Please check the tools and their documentation +in your Kerberos environment. + +Kerberos depends on proper hostname resolution, so please check your DNS infrastructure. +Incorrect DNS setup, DNS SRV records or configuration for KDC servers in `krb5.conf` +can cause problems with hostname resolution. + +-- + +`Failure unspecified at GSS-API level (Mechanism level: Request is a replay (34))`:: + +`Failure unspecified at GSS-API level (Mechanism level: Clock skew too great (37))`:: ++ +-- + +To prevent replay attacks, Kerberos V5 sets a maximum tolerance for computer +clock synchronization and it is typically 5 minutes. Please check whether +the time on the machines within the domain is in sync. + +-- + +`gss_init_sec_context() failed: An unsupported mechanism was requested`:: + +`No credential found for: 1.2.840.113554.1.2.2 usage: Accept`:: ++ +-- + +You would usually see this error message on the client side when using `curl` to +test {es} Kerberos setup. For example, these messages occur when you are using +an old version of curl on the client and therefore Kerberos Spnego support is missing. +The Kerberos realm in {es} only supports Spengo mechanism (Oid 1.3.6.1.5.5.2); +it does not yet support Kerberos mechanism (Oid 1.2.840.113554.1.2.2). + +Make sure that: + +* You have installed curl version 7.49 or above as older versions of curl have +known Kerberos bugs. + +* The curl installed on your machine has `GSS-API`, `Kerberos` and `SPNEGO` +features listed when you invoke command `curl -V`. If not, you will need to +compile `curl` version with this support. + +To download latest curl version visit https://curl.haxx.se/download.html + +-- + +As Kerberos logs are often cryptic in nature and many things can go wrong +as it depends on external services like DNS and NTP. You might +have to enable additional debug logs to determine the root cause of the issue. + +{es} uses a JAAS (Java Authentication and Authorization Service) Kerberos login +module to provide Kerberos support. To enable debug logs on {es} for the login +module use following Kerberos realm setting: +[source,yaml] +---------------- +xpack.security.authc.realms..krb.debug: true +---------------- + +For detailed information, see <>. + +Sometimes you may need to go deeper to understand the problem during SPNEGO +GSS context negotiation or look at the Kerberos message exchange. To enable +Kerberos/SPNEGO debug logging on JVM, add following JVM system properties: + +`-Dsun.security.krb5.debug=true` + +`-Dsun.security.spnego.debug=true` + +For more information about JVM system properties, see <>. + +[[trb-security-saml]] +=== Common SAML issues + +Some of the common SAML problems are shown below with tips on how to resolve +these issues. + +. *Symptoms:* ++ +-- +Authentication in {kib} fails and the following error is printed in the {es} +logs: + +.... +Cannot find any matching realm for [SamlPrepareAuthenticationRequest{realmName=null, +assertionConsumerServiceURL=https://my.kibana.url/api/security/v1/saml}] +.... + +*Resolution:* + +{es}, {kib} and your Identity Provider need all have the same view on what the +Assertion Consumer Service URL of the SAML Service Provider is. + +.. {es} discovers this via the `sp.acs` setting in your {es} SAML realm configuration +.. {kib} constructs this value using the `server.host` and `server.port` in +`kibana.yml`. For instance: ++ +[source, shell] +----------------------------------------------- +server.host: kibanaserver.org +server.port: 3456 +----------------------------------------------- ++ +These settings would mean that {kib} would construct the Assertion Consumer +Service URL as `https://kibanaserver.org:3456/api/security/v1/saml`. However, +if for example, {kib} is behind a reverse proxy and you have configured the +following `xpack.security.public.*` settings: ++ +[source, shell] +----------------------------------------------- +xpack.security.public: + protocol: https + hostname: kibana.proxy.com + port: 8080 +----------------------------------------------- ++ +These settings would instruct {kib} to construct the Assertion Consumer Service +URL as `https://kibana.proxy.com:8080/api/security/v1/saml` + +.. The SAML Identity Provider is either explicitly configured by the IdP +administrator or consumes the SAML metadata that are generated by {es} and as +such contain the same value for the +as the one +that is configured in the the `sp.acs` setting in the {es} SAML realm +configuration. +-- ++ +The error encountered here indicates that the Assertion Consumer Service URL +that {kib} has constructed via one of the aforementioned ways +(`https://my.kibana.url/api/security/v1/saml`) is not the one that {es} is +configured with. Note that these two URLs are compared as case-sensitive strings +and not as canonicalized URLs. ++ +Often, this can be resolved by changing the `sp.acs` URL in `elasticearch.yml` +to match the value that {kib} has constructed. Note however, that the SAML IdP +configuration needs to also be adjusted to reflect this change. ++ +Alternatively, if you think {kib} is using the wrong value for the Assertion +Consumer Service URL, you will need to change the configuration in `kibana.yml` +by adjusting either the `server.host` and `server.port` to change the URL {kib} +listens to or the `xpack.security.public.*` settings to make {kib} aware about +its correct public URL. + +. *Symptoms:* ++ +-- +Authentication in {kib} fails and the following error is printed in the +{es} logs: + +.... +Authentication to realm saml1 failed - Provided SAML response is not valid for realm +saml/saml1 (Caused by ElasticsearchSecurityException[Conditions [https://some-url-here...] +do not match required audience [https://my.kibana.url]]) +.... + +*Resolution:* + +We received a SAML response that is addressed to another SAML Service Provider. +This usually means that the configured SAML Service Provider Entity ID in +`elasticsearch.yml` (`sp.entity_id`) does not match what has been configured as +the SAML Service Provider Entity ID in the SAML Identity Provider documentation. + +To resolve this issue, ensure that both the saml realm in {es} and the IdP are +configured with the same string for the SAML Entity ID of the Service Provider. + +TIP: These strings are compared as case-sensitive strings and not as +canonicalized URLs even when the values are URL-like. Be mindful of trailing +slashes, port numbers, etc. + +-- + +. *Symptoms:* ++ +-- +Authentication in {kib} fails and the following error is printed in the +{es} logs: + +.... +Cannot find metadata for entity [your:entity.id] in [metadata.xml] +.... + +*Resolution:* + +We could not find the metadata for the SAML Entity ID `your:entity.id` in the +configured metadata file (`metadata.xml`). + +.. Ensure that the `metadata.xml` file you are using is indeed the one provided +by your SAML Identity Provider. +.. Ensure that the `metadata.xml` file contains one element +as follows: `] +for action [cluster:admin/xpack/security/saml/authenticate] +.... + +*Resolution:* + +This error indicates that {es} failed to process the incoming SAML +authentication message. Since the message can't be processed, {es} is not aware +of who the to-be authenticated user is and the `` +placeholder is used instead. To diagnose the _actual_ problem, you must check +the {es} logs for further details. +-- + +. *Symptoms:* ++ +-- +Authentication in {kib} fails and the following error is printed in the +{es} logs: + +.... +Authentication to realm my-saml-realm failed - +Provided SAML response is not valid for realm saml/my-saml-realm +(Caused by ElasticsearchSecurityException[SAML Response is not a 'success' response: +Code=urn:oasis:names:tc:SAML:2.0:status:AuthnFailed Message=null Detail=null]) +.... + +*Resolution:* + +This means that the SAML Identity Provider failed to authenticate the user and +sent a SAML Response to the Service Provider ({stack}) indicating this failure. +The `Code`, `Message` and `Detail` can convey different error identifiers and +additional information that might offer an indication about the cause of the +failure. In case `Message` and `Detail` are null, please consult the logs and +documentation of the Identity Provider in order to further diagnose the problem. +-- + +. *Symptoms:* ++ +-- +Authentication in {kib} fails and the following error is printed in the +{es} logs: + +.... +The XML Signature of this SAML message cannot be validated. Please verify that the saml +realm uses the correct SAMLmetadata file/URL for this Identity Provider +.... + +*Resolution:* + +This means that {es} failed to validate the digital signature of the SAML +message that the Identity Provider sent. {es} uses the public key of the +Identity Provider that is included in the SAML metadata, in order to validate +the signature that the IdP has created using its corresponding private key. +Failure to do so, can have a number of causes: + +.. As the error message indicates, the most common cause is that the wrong +metadata file is used and as such the public key it contains doesn't correspond +to the private key the Identity Provider uses. +.. The configuration of the Identity Provider has changed or the key has been +rotated and the metadata file that {es} is using has not been updated. +.. The SAML Response has been altered in transit and the signature cannot be +validated even though the correct key is used. + +NOTE: The private keys and public keys and self-signed X.509 certificates that +are used in SAML for digital signatures as described above have no relation to +the keys and certificates that are used for TLS either on the transport or the +http layer. A failure such as the one described above has nothing to do with +your `xpack.ssl` related configuration. + +-- + +. *Symptoms:* ++ +-- +Users are unable to login with a local username and password in {kib} because +SAML is enabled. + +*Resolution:* + +If you want your users to be able to use local credentials to authenticate to +{kib} in addition to using the SAML realm for Single Sign-On, you must enable +the `basic` `authProvider` in {kib}. The process is documented in the +<> +-- + +*Logging:* + +Very detailed trace logging can be enabled specifically for the SAML realm by +setting the following transient setting: + +[source, shell] +----------------------------------------------- +PUT /_cluster/settings +{ + "transient": { + "logger.org.elasticsearch.xpack.security.authc.saml": "trace" + } +} +----------------------------------------------- + + +Alternatively, you can add the following lines to the end of the +`log4j2.properties` configuration file in the `ES_PATH_CONF`: + +[source,properties] +---------------- +logger.saml.name = org.elasticsearch.xpack.security.authc.saml +logger.saml.level = TRACE +---------------- + +[[trb-security-internalserver]] +=== Internal Server Error in Kibana + +*Symptoms:* + +* In 5.1.1, an `UnhandledPromiseRejectionWarning` occurs and {kib} displays an +Internal Server Error. +//TBD: Is the same true for later releases? + +*Resolution:* + +If the Security plugin is enabled in {es} but disabled in {kib}, you must +still set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`. +Otherwise, {kib} cannot connect to {es}. + + +[[trb-security-setup]] +=== Setup-passwords command fails due to connection failure + +The <> sets +passwords for the built-in users by sending user management API requests. If +your cluster uses SSL/TLS for the HTTP (REST) interface, the command attempts to +establish a connection with the HTTPS protocol. If the connection attempt fails, +the command fails. + +*Symptoms:* + +. {es} is running HTTPS, but the command fails to detect it and returns the +following errors: ++ +-- +[source, shell] +------------------------------------------ +Cannot connect to elasticsearch node. +java.net.SocketException: Unexpected end of file from server +... +ERROR: Failed to connect to elasticsearch at +http://127.0.0.1:9200/_security/_authenticate?pretty. +Is the URL correct and elasticsearch running? +------------------------------------------ +-- + +. SSL/TLS is configured, but trust cannot be established. The command returns +the following errors: ++ +-- +[source, shell] +------------------------------------------ +SSL connection to +https://127.0.0.1:9200/_security/_authenticate?pretty +failed: sun.security.validator.ValidatorException: +PKIX path building failed: +sun.security.provider.certpath.SunCertPathBuilderException: +unable to find valid certification path to requested target +Please check the elasticsearch SSL settings under +xpack.security.http.ssl. +... +ERROR: Failed to establish SSL connection to elasticsearch at +https://127.0.0.1:9200/_security/_authenticate?pretty. +------------------------------------------ +-- + +. The command fails because hostname verification fails, which results in the +following errors: ++ +-- +[source, shell] +------------------------------------------ +SSL connection to +https://idp.localhost.test:9200/_security/_authenticate?pretty +failed: java.security.cert.CertificateException: +No subject alternative DNS name matching +elasticsearch.example.com found. +Please check the elasticsearch SSL settings under +xpack.security.http.ssl. +... +ERROR: Failed to establish SSL connection to elasticsearch at +https://elasticsearch.example.com:9200/_security/_authenticate?pretty. +------------------------------------------ +-- + +*Resolution:* + +. If your cluster uses TLS/SSL for the HTTP interface but the +`elasticsearch-setup-passwords` command attempts to establish a non-secure +connection, use the `--url` command option to explicitly specify an HTTPS URL. +Alternatively, set the `xpack.security.http.ssl.enabled` setting to `true`. + +. If the command does not trust the {es} server, verify that you configured the +`xpack.security.http.ssl.certificate_authorities` setting or the +`xpack.security.http.ssl.truststore.path` setting. + +. If hostname verification fails, you can disable this verification by setting +`xpack.security.http.ssl.verification_mode` to `certificate`. + +For more information about these settings, see +<>. + +[[trb-security-path]] +=== Failures due to relocation of the configuration files + +*Symptoms:* + +* Active Directory or LDAP realms might stop working after upgrading to {es} 6.3 +or later releases. In 6.4 or later releases, you might see messages in the {es} +log that indicate a config file is in a deprecated location. + +*Resolution:* + +By default, in 6.2 and earlier releases, the security configuration files are +located in the `ES_PATH_CONF/x-pack` directory, where `ES_PATH_CONF` is an +environment variable that defines the location of the +<>. + +In 6.3 and later releases, the config directory no longer contains an `x-pack` +directory. The files that were stored in this folder, such as the +`log4j2.properties`, `role_mapping.yml`, `roles.yml`, `users`, and `users_roles` +files, now exist directly in the config directory. + +IMPORTANT: If you upgraded to 6.3 or later releases, your old security +configuration files still exist in an `x-pack` folder. That file path is +deprecated, however, and you should move your files out of that folder. + +In 6.3 and later releases, settings such as `files.role_mapping` default to +`ES_PATH_CONF/role_mapping.yml`. If you do not want to use the default locations, +you must update the settings appropriately. See +<>. + diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 67844290cd051..13d9eacc2e446 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -1,8 +1,9 @@ +[role="xpack"] [[actions]] == Actions When a watch's condition is met, its actions are executed unless it is being -<>. A watch can perform multiple actions. +<>. A watch can perform multiple actions. The actions are executed one at a time and each action executes independently. Any failures encountered while executing an action are recorded in the action result and in the watch history. @@ -15,13 +16,13 @@ support their execution in any way they need. For example, the payload might serve as a model for a templated email body. {watcher} supports the following types of actions: -<>, <>, <>, -<>, <>, <>, and <>. +<>, <>, <>, +<>, <>, +and <>. [float] [[actions-ack-throttle]] -=== Acknowledgement and Throttling +=== Acknowledgement and throttling During the watch execution, once the condition is met, a decision is made per configured action as to whether it should be throttled. The main purpose of @@ -93,7 +94,7 @@ PUT _watcher/watch/error_logs_alert // CONSOLE <1> There will be at least 15 minutes between subsequent `email_administrator` action executions. -<2> See <> for more information. +<2> See <> for more information. You can also define a throttle period at the watch level. The watch-level throttle period serves as the default throttle period for all of the actions @@ -165,15 +166,14 @@ xpack.watcher.execution.default_throttle_period: 15m -------------------------------------------------- {watcher} also supports acknowledgement-based throttling. You can acknowledge a -watch using the {ref}/watcher-api-ack-watch.html[Ack Watch API] to prevent the +watch using the <> to prevent the watch actions from being executed again while the watch condition remains `true`. This essentially tells {watcher} "I received the notification and I'm handling it, please do not notify me about this error again". An acknowledged watch action remains in the `acked` state until the watch's condition evaluates to `false`. When that happens, the action's state changes to `awaits_successful_execution`. -To acknowledge an action, you use the -{ref}/watcher-api-ack-watch.html[Ack Watch API]: +To acknowledge an action, you use the <>: [source,js] ---------------------------------------------------------------------- @@ -259,28 +259,18 @@ PUT _watcher/watch/log_event_watch <1> A `condition` that only applies to the `notify_pager` action, which restricts its execution to when the condition succeeds (at least 5 hits in this case). -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/email.asciidoc include::actions/email.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/webhook.asciidoc include::actions/webhook.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/index.asciidoc include::actions/index.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/logging.asciidoc include::actions/logging.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/hipchat.asciidoc -include::actions/hipchat.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/slack.asciidoc include::actions/slack.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/pagerduty.asciidoc include::actions/pagerduty.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/jira.asciidoc include::actions/jira.asciidoc[] [float] @@ -292,7 +282,7 @@ that even despite the exact same version, an OpenJDK distribution contains different parts under different Linux distributions. This can lead to issues with any action or input that uses TLS, like the `jira`, -`pagerduty`, `slack`, `hipchat` or `webhook` one, because of missing CA certs. +`pagerduty`, `slack`, or `webhook` one, because of missing CA certs. If you encounter TLS errors, when writing watches that connect to TLS endpoints, you should try to upgrade to the latest available OpenJDK distribution for your platform and if that does not help, try to upgrade to Oracle JDK. diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index 37cd8450850e2..1b202b000ca15 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[actions-email]] -=== Email Action +=== Email action Use the `email` action to send email notifications. To send email, you must <> in @@ -15,7 +16,7 @@ account configuration. The required attributes must either be set in the email action definition or the account's `email_defaults`. [[configuring-email-actions]] -==== Configuring Email Actions +==== Configuring email actions You configure email actions in the `actions` array. Action-specific attributes are specified using the `email` keyword. @@ -47,7 +48,7 @@ the watch payload in the email body: account configuration. [[configuring-email-attachments]] -==== Configuring Email Attachments +==== Configuring email attachments You can attach the execution context payload or data from an any HTTP service to the email notification. There is no limit on the number of attachments you can @@ -117,11 +118,11 @@ killed by firewalls or load balancers in-between. specify the `url` attribute to configure the host and path to the service endpoint. See <> for the full list of HTTP request attributes. Required. -|====== +|===== .`data` attachment type attributes [options="header"] -|===== +|====== | Name | Description | `format` | Attaches the watch data, equivalent to specifying `attach_data` in the watch configuration. Possible values are `json` or `yaml`. @@ -131,7 +132,7 @@ killed by firewalls or load balancers in-between. .`reporting` attachment type attributes [options="header"] -|===== +|====== | Name | Description | `url` | The URL to trigger the dashboard creation | `inline` | Configures as an attachment to sent with disposition `inline`. This @@ -151,20 +152,15 @@ killed by firewalls or load balancers in-between. [[email-action-reports]] -===== Attaching Reports to an Email +===== Attaching reports to an email You can use the `reporting` attachment type in an `email` action to automatically generate a Kibana report and distribute it via email. -include::{kib-repo-dir}/reporting/watch-example.asciidoc[] - -include::{kib-repo-dir}/reporting/report-intervals.asciidoc[] - -For more information, see -{kibana-ref}/automating-report-generation.html[Automating Report Generation]. +See {kibana-ref}/automating-report-generation.html[Automating report generation]. [[email-action-attributes]] -==== Email Action Attributes +==== Email action attributes [cols=",^,,", options="header"] |====== @@ -251,7 +247,7 @@ A list of addresses can be specified as a an array: `[ 'Personal Name ', 'user2@host.domain' ]`. [[configuring-email]] -==== Configuring Email Accounts +==== Configuring email accounts {watcher} can send email using any SMTP email service. Email messages can contain basic HTML tags. You can control which groups of tags are @@ -280,14 +276,14 @@ email system. For more information about configuring {watcher} to work with different email systems, see: -* <> -* <> -* <> -* <> +* <> +* <> +* <> +* <> If you configure multiple email accounts, you must either configure a default account or specify which account the email should be sent with in the -<> action. +<> action. [source,yaml] -------------------------------------------------- @@ -302,7 +298,7 @@ xpack.notification.email: [float] [[gmail]] -===== Sending Email From Gmail +===== Sending email from Gmail Use the following email account settings to send email from the https://mail.google.com[Gmail] SMTP service: @@ -325,7 +321,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.gmail_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.gmail_account.smtp.secure_password -------------------------------------------------- If you get an authentication error that indicates that you need to continue the @@ -340,7 +336,7 @@ for more information. [float] [[outlook]] -===== Sending Email from Outlook.com +===== Sending email from Outlook.com Use the following email account settings to send email action from the https://www.outlook.com/[Outlook.com] SMTP service: @@ -363,7 +359,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.outlook_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.outlook_account.smtp.secure_password -------------------------------------------------- @@ -376,7 +372,7 @@ NOTE: You need to use a unique App Password if two-step verification is enable [float] [[amazon-ses]] -===== Sending Email from Amazon SES (Simple Email Service) +===== Sending email from Amazon SES (Simple Email Service) Use the following email account settings to send email from the http://aws.amazon.com/ses[Amazon Simple Email Service] (SES) SMTP service: @@ -400,7 +396,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.ses_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.ses_account.smtp.secure_password -------------------------------------------------- NOTE: You need to use your Amazon SES SMTP credentials to send email through @@ -413,7 +409,7 @@ NOTE: You need to use your Amazon SES SMTP credentials to send email through [float] [[exchange]] -===== Sending Email from Microsoft Exchange +===== Sending email from Microsoft Exchange Use the following email account settings to send email action from Microsoft Exchange: @@ -443,12 +439,12 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.exchange_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.exchange_account.smtp.secure_password -------------------------------------------------- [float] [[email-html-sanitization]] -===== Configuring HTML Sanitization Options +===== Configuring HTML sanitization options The `email` action supports sending messages with an HTML body. However, for security reasons, {watcher} https://en.wikipedia.org/wiki/HTML_sanitization[sanitizes] diff --git a/x-pack/docs/en/watcher/actions/hipchat.asciidoc b/x-pack/docs/en/watcher/actions/hipchat.asciidoc deleted file mode 100644 index 27cd177975a62..0000000000000 --- a/x-pack/docs/en/watcher/actions/hipchat.asciidoc +++ /dev/null @@ -1,393 +0,0 @@ -[[actions-hipchat]] -=== HipChat Action - -Use the `hipchat` action to send messages to https://www.hipchat.com[HipChat] -rooms or users. To send HipChat messages, you must -<> in `elasticsearch.yml`. - -[[configuring-hipchat-actions]] -==== Configuring HipChat Actions - -You configure HipChat actions in a `actions` array. Action-specific attributes -are specified using the `hipchat` keyword. You must specify the `message` -attribute for all `hipchat` actions. If you omit the `account` attribute, the -message is sent using the default HipChat account configured in -`elasticsearch.yml`. - -For example, the following action is configured to send messages using a HipChat -account that uses the <> profile. Because -this type of account can only send messages to a specific room, the only required -attribute is the message itself: - -[source,js] --------------------------------------------------- -"actions" : { - "notify-hipchat" : { - "transform" : { ... }, - "throttle_period" : "5m", - "hipchat" : { - "account" : "integration-account", <1> - "message" : { - "body" : "Encountered {{ctx.payload.hits.total.value}} errors in the last 5 minutes (facepalm)", <2> - "format" : "text", - "color" : "red", - "notify" : true - } - } - } -} --------------------------------------------------- -// NOTCONSOLE -<1> The name of a HipChat account configured in `elasticsearch.yml`. -<2> The message you want to send to HipChat. - -To send messages with a HipChat account that uses the <> -profile, you need to specify what rooms and users you want to send the message to. -For example, the following action is configured to send messages to the -`mission-control` and `devops` rooms as well as the user `website-admin@example.com`. -(To send to multiple users or rooms, specify an array of strings): - -[source,js] --------------------------------------------------- -"actions" : { - "notify-hipchat" : { - "transform" : { ... }, - "throttle_period" : "5m", - "hipchat" : { - "account" : "user-account", - "message" : { - "room" : [ "mission-control", "devops" ], - "user" : "website-admin@example.com", - "body" : "Encountered {{ctx.payload.hits.total.value}} errors in the last 5 minutes (facepalm)", - "format" : "text", - "color" : "red", - "notify" : true - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -To send messages with a HipChat account that uses the <> -profile, you need to specify what room or rooms you want to send the message to. -For example, the following action is configured to send messages to the -`server-status` room. (To send to multiple rooms, specify an array of strings.) - -[source,js] --------------------------------------------------- -"actions" : { - "notify-hipchat" : { - "transform" : { ... }, - "throttle_period" : "5m", - "hipchat" : { - "account" : "v1-account", - "message" : { - "from" : "Watcher", - "room" : [ "server-status", "infra-team" ], - "body" : "Encountered {{ctx.payload.hits.total.value}} errors in the last 5 minutes (facepalm)", - "format" : "text", - "color" : "red", - "notify" : true - } - } - } -} --------------------------------------------------- -// NOTCONSOLE - -[[hipchat-action-attributes]] -==== HipChat Action Attributes - -[cols=",^,,", options="header"] -|====== -| Name |Required | Default | Description - -| `account` | no | Default account | The HipChat account to use to send the message. - -| `proxy.host` | no | - | The proxy host to use (only in combination with `proxy.port`) - -| `proxy.port` | no | - | The proxy port to use (only in combination with `proxy.host`) - -| `message.body` | yes | - | The message content. Can contain up to 1000 characters. - -| `message.format` | no | html | The format of the message: `text` or `html`. - -| `message.color` | no | yellow | The background color of the notification in the room: - `gray`, `green`, `purple`, `red`, `yellow`. - -| `message.notify` | no | false | Indicates whether people in the room should be actively - notified - -| `message.from` | no | the watch ID | The name that appears as the notification sender. Only - valid for accounts that use the v1 profile. - -| `message.room` | no | - | The rooms that the notification should go to. Accepts - a string value or an array of string values. Must be - specified when using the v1 profile. At least one room - or user must be specified when using the `user` profile. - Not valid for the `integration` profile. - -| `message.user` | no | - | The users that the notification should go to. Accepts - a string value or an array of string values. At least - one room or user must be specified when using the `user` - profile. Not valid for the `integration` or `v1` profiles. -|====== - - -[[configuring-hipchat]] -==== Configuring HipChat Accounts - -You configure the accounts {watcher} can use to communicate with HipChat in the -`xpack.notification.hipchat` namespace in `elasticsearch.yml`. Both -https://www.hipchat.com/docs/api[v1] and -https://www.hipchat.com/docs/apiv2[v2] HipChat APIs are supported. - -{watcher} provides three HipChat API profiles: - -<>:: -Sends messages to a specific room using HipChat's v2 API -https://www.hipchat.com/docs/apiv2/method/send_room_notification[Send room -notification]. - -<>:: -Sends messages as a particular user through the HipChat v2 API. Enables you to -send messages to arbitrary rooms or users. - -<>:: -Sends messages to rooms using HipChat's v1 API -https://www.hipchat.com/docs/api/method/rooms/message[rooms/message]. -+ -NOTE: The `v1` profile is provided because it is simple to set up and this API - is familiar to many users. That said, HipChat has deprecated the v1 API - and is encouraging users to migrate to v2. Both the `integration` and - `user` profiles are based on the HipChat v2 API. - -If you configure multiple HipChat accounts, you either need to set a default -HipChat account or specify which account the notification should be sent with -in the <> action. - -deprecated[Storing the `auth_token` in the configuration file or using via updating the settings now is deprecated, as you should use the keystore for this, see {ref}/secure-settings.html[secure settings]] - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - default_account: team1 - account: - team1: - ... - team2: - ... --------------------------------------------------- - - -[[hipchat-api-integration]] -===== Using the Hipchat Integration Profile - -You can use the `integration` profile to send messages to specific rooms. When -you set an account's profile to `integration`, the messages are sent through -HipChat's v2 https://www.hipchat.com/docs/apiv2/method/send_room_notification[ -Send room notification] API. - -When you use the `integration` profile, you need to configure a separate HipChat -account for each room you want to send messages--the account configuration -contains a room-specific authentication token. Alternatively, you can use the -<> or <> profile to send messages -to multiple rooms. - -NOTE: The `integration` profile only supports sending messages to rooms, it does - not support sending private messages. Use the <> - profile to notify a particular HipChat user. - -You need a room-specific authentication token to configure an `integration` -account. To generate an authentication token: - -. Log in to http://hipchat.com[hipchat.com] or your HipChat server as a group - administrator. -. Go to *Group admin > Rooms*. -. Click the name of the room you want to send messages to. -. Click the *Tokens* link. -. Enter a name for the token in the *Label* field. -+ -image::images/hipchat-generate-room-token.jpg[] -. Select the *Send Notification* scope. -. Click *Create*. -. Copy the generated token so you can paste it into your HipChat account - configuration in `elasticsearch.yml`. -+ -image::images/hipchat-copy-room-token.jpg[] - -To configure a HipChat account that uses the `integration` profile: - -. Set the `type` to `integration`. -. Set `room` to the name of the room you want to send messages to. -. Set `auth_token` to the room-specific authentication token. - -For example, the following snippet configures an account called -`notify-monitoring` that sends messages to the `monitoring` room: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-keystore add xpack.notification.hipchat.account.notify-monitoring.secure_auth_token --------------------------------------------------- - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: integration - room: monitoring --------------------------------------------------- - -You can also specify defaults for the {ref}/notification-settings.html#hipchat-account-attributes[ -message attributes]: - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: integration - room: monitoring - message: - format: text - color: blue - notify: true --------------------------------------------------- - - -[[hipchat-api-user]] -===== Using the HipChat User Profile - -You can use the `user` profile to send messages to rooms as well as individual -HipChat users. When you set an account's profile to `user`, {watcher} sends -messages as a particular user through the HipChat v2 API. - -Before you can configure a `user` account, you need to: - -. Add a HipChat user for {watcher}. When setting the user name, keep in mind that - the messages are sent on behalf of this user. -. Create an API token for the {watcher} user: -.. Log in to HipChat as the {watcher} user. -.. Go to `https:///account/api`. For example, - `https://www.hipchat.com/account/api`. -.. Confirm the user password. -.. Enter a name for the token in the *Label* field. -+ -image::images/hipchat-generate-user-token.jpg[] -. Select the *Send Notification* and *Send Message* scopes. -. Click *Create*. -. Copy the generated token so you can paste it into your HipChat account - configuration in `elasticsearch.yml`. -+ -image::images/hipchat-copy-room-token.jpg[] - -To configure a HipChat account that uses the `user` profile: - -. Set the `type` to `user`. -. Set `user` to the email address associated with the {watcher} user. -. Set `auth_token` to the {watcher} user's authentication token. - -For example, the following configuration creates an account called -`notify-monitoring` that sends messages to the `monitoring` room: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-keystore add xpack.notification.hipchat.account.notify-monitoring.secure_auth_token --------------------------------------------------- - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: user --------------------------------------------------- - -You can also specify defaults for the <{ref}/notification-settings.html#hipchat-account-attributes[ -message attributes]: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-keystore add xpack.notification.hipchat.account.notify-monitoring.secure_auth_token --------------------------------------------------- - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: user - message: - format: text - color: blue - notify: true --------------------------------------------------- - - -[[hipchat-api-v1]] -===== Using the HipChat v1 Profile - -You can use the `v1` profile to send messages to particular rooms. When you set -an account's profile to `v1`, messages are sent through HipChat's v1 -https://www.hipchat.com/docs/api/method/rooms/message[rooms/message] API. - -WARNING: The `v1` profile uses a deprecated API that is expected to be removed - by HipChat in the future. - -The `v1` profile only supports sending messages to rooms, it does not support -sending private messages. Use the <> profile to send -private messages to HipChat users. - -Before you can configure a `v1` account, you need to generate a `v1` API token: - -. Log in to your HipChat server as a group admin. -. Go to `https:///admin/api`. For example, - `https://hipchat.com/admin/api`. -. Confirm your admin password. -. Select the *Notification* type. -+ -image::images/hipchat-generate-v1-token.jpg[] -. Enter a name for the token in the *Label* field. -. Click *Create*. -. Copy the generated token so you can paste it into your HipChat account - configuration in `elasticsearch.yml`. -+ -image::images/hipchat-copy-v1-token.jpg[] - -To configure a HipChat account that uses the `v1` profile: - -. Set the `type` to `v1`. -. Set `auth_token` to the v1 authentication token you generated. - -For example, the following configuration creates an account called -`notify-monitoring`: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-keystore add xpack.notification.hipchat.account.notify-monitoring.secure_auth_token --------------------------------------------------- - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: v1 --------------------------------------------------- - -You can also specify defaults for the {ref}/notification-settings.html#hipchat-account-attributes[ -message attributes]. - -[source,yaml] --------------------------------------------------- -xpack.notification.hipchat: - account: - notify-monitoring: - profile: v1 - message: - format: text - color: blue - notify: true --------------------------------------------------- diff --git a/x-pack/docs/en/watcher/actions/index.asciidoc b/x-pack/docs/en/watcher/actions/index.asciidoc index 34fdad7c50d0b..14a755712bb7a 100644 --- a/x-pack/docs/en/watcher/actions/index.asciidoc +++ b/x-pack/docs/en/watcher/actions/index.asciidoc @@ -1,10 +1,11 @@ +[role="xpack"] [[actions-index]] -=== Index Action +=== Index action Use the `index` action to index data into Elasticsearch. See <> for the supported attributes. -==== Configuring Index Actions +==== Configuring index actions The following snippet shows a simple `index` action definition: @@ -23,14 +24,14 @@ The following snippet shows a simple `index` action definition: -------------------------------------------------- // NOTCONSOLE <1> The id of the action -<2> An optional <> to restrict action execution -<3> An optional <> to transform the payload and prepare the data that should be indexed +<2> An optional <> to restrict action execution +<3> An optional <> to transform the payload and prepare the data that should be indexed <4> The elasticsearch index to store the data to <5> An optional `_id` for the document, if it should always be the same document. [[index-action-attributes]] -==== Index Action Attributes +==== Index action attributes [options="header"] |====== @@ -55,7 +56,7 @@ The following snippet shows a simple `index` action definition: |====== [[anatomy-actions-index-multi-doc-support]] -==== Multi-Document Support +==== Multi-document support Like with all other actions, you can use a <> to replace the current execution context payload with another and by that change the document diff --git a/x-pack/docs/en/watcher/actions/jira.asciidoc b/x-pack/docs/en/watcher/actions/jira.asciidoc index f0b9c714181b8..e2c2a4824a10d 100644 --- a/x-pack/docs/en/watcher/actions/jira.asciidoc +++ b/x-pack/docs/en/watcher/actions/jira.asciidoc @@ -1,11 +1,12 @@ +[role="xpack"] [[actions-jira]] -=== Jira Action +=== Jira action Use the `jira` action to create issues in https://www.atlassian.com/software/jira[Atlassian's Jira Software]. To create issues you need to <> in `elasticsearch.yml`. [[configuring-jira-actions]] -==== Configuring Jira Actions +==== Configuring Jira actions You configure Jira actions in the `actions` array. Action-specific attributes are specified using the `jira` keyword. @@ -48,7 +49,7 @@ The following snippet shows a simple jira action definition: <7> The priority of the Jira issue. [[jira-action-attributes]] -==== Jira Action Attributes +==== Jira action attributes Depending of how Jira projects are configured, the issues can have many different fields and values. Therefore the `jira` action can accept any type of sub fields within its `issue` field. These fields will be directly used @@ -99,7 +100,7 @@ always required to create an issue in Jira. |====== [[configuring-jira]] -==== Configuring Jira Accounts +==== Configuring Jira accounts You configure the accounts {watcher} can use to communicate with Jira in the `xpack.notification.jira` namespace in `elasticsearch.yml`. @@ -109,12 +110,15 @@ Jira account you need to specify (see {ref}/secure-settings.html[secure settings [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_url -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_user -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_password +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_url +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_user +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_password -------------------------------------------------- -deprecated[The insecure way of storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is deprecated] +[WARNING] +====== +Storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is insecure and has been deprecated. Please use {es}'s secure {ref}/secure-settings.html[keystore] method instead. +====== To avoid credentials that transit in clear text over the network, {watcher} will reject `url` settings like `http://internal-jira.elastic.co` that are based on @@ -137,7 +141,7 @@ WARNING: It is strongly advised to use Basic Authentication with secured HTTPS protocol only. You can also specify defaults for the -{ref}/notification-settings.html#jira-account-attributes[Jira issues]: +<>: [source,yaml] -------------------------------------------------- @@ -155,7 +159,7 @@ xpack.notification.jira: If you configure multiple Jira accounts, you either need to configure a default account or specify which account the notification should be sent with in the -<> action. +<> action. [source,yaml] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/actions/logging.asciidoc b/x-pack/docs/en/watcher/actions/logging.asciidoc index a8a4454c377eb..a279c83c20884 100644 --- a/x-pack/docs/en/watcher/actions/logging.asciidoc +++ b/x-pack/docs/en/watcher/actions/logging.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-logging]] === Logging Action @@ -7,7 +8,7 @@ logs. See <> for the supported attributes. This action is primarily used during development and for debugging purposes. [[configuring-logging-actions]] -==== Configuring Logging Actions +==== Configuring logging actions You configure logging actions in the `actions` array. Action-specific attributes are specified using the `logging` keyword. @@ -33,7 +34,7 @@ The following snippet shows a simple logging action definition: [[logging-action-attributes]] -==== Logging Action Attributes +==== Logging action attributes [options="header"] |====== diff --git a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc index 87a7b06b2cfa6..47fd282c40df4 100644 --- a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc +++ b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc @@ -1,12 +1,13 @@ +[role="xpack"] [[actions-pagerduty]] -=== PagerDuty Action +=== PagerDuty action Use the PagerDuty action to create events in https://pagerduty.com/[ PagerDuty]. To create PagerDuty events, you must <> in `elasticsearch.yml`. [[configuring-pagerduty-actions]] -==== Configuring PagerDuty Actions +==== Configuring PagerDuty actions You configure PagerDuty actions in the `actions` array. Action-specific attributes are specified using the `pagerduty` keyword. @@ -30,7 +31,7 @@ The following snippet shows a simple PagerDuty action definition: [[adding-context-and-payloads-to-pagerduty-actions]] -==== Adding Meta Information to a PagerDuty Incident +==== Adding meta information to a PagerDuty incident To give the PagerDuty incident some more context, you can attach the payload as well as an array of contexts to the action. @@ -64,7 +65,7 @@ payload as well as an array of contexts to the action. [[pagerduty-action-attributes]] -==== Pagerduty Action Attributes +==== Pagerduty action attributes [cols=",^,", options="header"] |====== @@ -75,7 +76,7 @@ payload as well as an array of contexts to the action. [[pagerduty-event-trigger-incident-attributes]] -.Pagerduty Event Trigger Incident Attributes +.Pagerduty event trigger incident attributes [cols=",^,", options="header"] |====== | Name |Required | Description @@ -114,7 +115,7 @@ NOTE: All of those objects have templating support, so you can use data from the context and the payload as part of all the fields. [[pagerduty-event-trigger-context-attributes]] -.Pagerduty Event Trigger Context Attributes +.Pagerduty event trigger context attributes [cols=",^,", options="header"] |====== | Name |Required | Description @@ -128,7 +129,7 @@ NOTE: All of those objects have templating support, so you can use data from the |====== [[configuring-pagerduty]] -==== Configuring PagerDuty Accounts +==== Configuring PagerDuty accounts You configure the accounts {watcher} uses to communicate with PagerDuty in the `xpack.notification.pagerduty` namespace in `elasticsearch.yml`. @@ -148,17 +149,17 @@ image::images/pagerduty-services.jpg[] image::images/pagerduty-integrations.jpg[] To configure a PagerDuty account in the keystore, you -must specify an account name and integration key, (see {ref}/secure-settings.html[secure settings]): +must specify an account name and integration key, (see <>): [source,yaml] -------------------------------------------------- bin/elasticsearch-keystore add xpack.notification.pagerduty.account.my_pagerduty_account.secure_service_api_key -------------------------------------------------- -deprecated[Storing the service api key in the YAML file or via cluster update settings is still supported, but the keystore setting should be used] +deprecated[7.0.0, "Storing the service api key in the YAML file or via cluster update settings is still supported, but the keystore setting should be used."] -You can also specify defaults for the <>: +You can also specify defaults for the +<>: . [source,yaml] @@ -177,7 +178,7 @@ xpack.notification.pagerduty: If you configure multiple PagerDuty accounts, you either need to set a default account or specify which account the event should be sent with in the -<> action. +<> action. [source,yaml] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index ef8b907677b8c..74a8e2565ba54 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -1,13 +1,14 @@ +[role="xpack"] [[actions-slack]] === Slack Action Use the `slack` action to send messages to a https://slack.com/[Slack] team's channels or users. To send Slack messages, you need to -<> in +<> in `elasticsearch.yml`. [[configuring-slack-actions]] -==== Configuring Slack Actions +==== Configuring Slack actions You configure Slack actions in the `actions` array. Action-specific attributes are specified using the `slack` keyword. @@ -35,7 +36,7 @@ The following snippet shows a simple slack action definition: [[formatting-slack-messages]] -==== Using Attachments to Format Slack Messages +==== Using attachments to format Slack messages In addition to sending simple text-based messages, you can use the Slack https://api.slack.com/docs/attachments[attachment] mechanism to send formatted @@ -139,7 +140,7 @@ aggregation and the Slack action: generated by the transform. [[slack-action-attributes]] -==== Slack Action Attributes +==== Slack action attributes [cols=",^,", options="header"] |====== @@ -167,7 +168,7 @@ aggregation and the Slack action: | `message.dynamic_attachments` | no | Slack message attachments that can be populated dynamically based on the current watch payload. For more information, see - <>. + <>. | `proxy.host` | no | The proxy host to use (only in combination with `proxy.port`) @@ -196,17 +197,21 @@ image::images/slack-add-webhook-integration.jpg[] image::images/slack-copy-webhook-url.jpg[] To configure a Slack account, at a minimum you need to specify the account -name and webhook URL in the elasticsearch keystore (se {ref}/secure-settings.html[secure settings]): +name and webhook URL in the {es} keystore (see {ref}/secure-settings.html[secure settings]): [source,shell] -------------------------------------------------- bin/elasticsearch-keystore add xpack.notification.slack.account.monitoring.secure_url -------------------------------------------------- -deprecated[You can also configure this via settings in the `elasticsearch.yml` file, using the keystore is the preferred and secure way of doing this] +[WARNING] +====== +You can no longer configure Slack accounts using `elasticsearch.yml` settings. +Please use {es}'s secure <> method instead. +====== -You can also specify defaults for the {ref}/notification-settings.html#slack-account-attributes[Slack -notification attributes]: +You can specify defaults for the +<>: [source,yaml] -------------------------------------------------- @@ -228,7 +233,7 @@ xpack.notification.slack: If you configure multiple Slack accounts, you either need to configure a default account or specify which account the notification should be sent with in the -<> action. +<> action. [source,yaml] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/actions/webhook.asciidoc b/x-pack/docs/en/watcher/actions/webhook.asciidoc index 1b7c482d2c4be..e423aa56fd22c 100644 --- a/x-pack/docs/en/watcher/actions/webhook.asciidoc +++ b/x-pack/docs/en/watcher/actions/webhook.asciidoc @@ -1,13 +1,13 @@ +[role="xpack"] [[actions-webhook]] -=== Webhook Action +=== Webhook action Use the `webhook` action to send a request to any web service. The webhook action supports both HTTP and HTTPS connections. See -<> for the supported -attributes. +<> for the supported attributes. [[configuring-webook-actions]] -==== Configuring Webhook Actions +==== Configuring webhook actions You configure webhook actions in the `actions` array. Action-specific attributes are specified using the `webhook` keyword. @@ -32,9 +32,9 @@ The following snippet shows a simple webhook action definition: -------------------------------------------------- // NOTCONSOLE <1> The id of the action -<2> An optional <> to transform the payload before +<2> An optional <> to transform the payload before executing the `webhook` action -<3> An optional <> for the action +<3> An optional <> for the action (5 minutes in this example) <4> The HTTP method to use when connecting to the host <5> The host to connect to @@ -76,8 +76,9 @@ NOTE: By default, both the username and the password are stored in the `.watches You can also use PKI-based authentication when submitting requests to a cluster that has {es} {security-features} enabled. When you use PKI-based authentication instead of HTTP basic auth, you don't need to store any authentication -information in the watch itself. To use PKI-based authentication, you {ref}/notification-settings.html#ssl-notification-settings -[configure the SSL key settings] for {watcher} in `elasticsearch.yml`. +information in the watch itself. To use PKI-based authentication, you +<> for {watcher} in +`elasticsearch.yml`. [[webhook-query-parameters]] @@ -135,7 +136,7 @@ the values serve as the header values: <1> The header values can contain templated strings. -==== Webhook Action Attributes +==== Webhook action attributes [[webhook-action-attributes]] [cols=",^,^,", options="header"] |====== @@ -148,23 +149,23 @@ the values serve as the header values: | `port` | yes | - | The port the HTTP service is listening on. | `path` | no | - | The URL path. The path can be static text or include Mustache - <>. URL query string parameters must be + <>. URL query string parameters must be specified via the `request.params` attribute. | `method` | no | get | The HTTP method. Valid values are: `head`, `get`, `post`, `put` and `delete`. | `headers` | no | - | The HTTP request headers. The header values can be static text - or include Mustache <>. + or include Mustache <>. | `params` | no | - | The URL query string parameters. The parameter values can be - static text or include Mustache <>. + static text or include Mustache <>. | `auth` | no | - | Authentication related HTTP headers. Currently, only basic authentication is supported. | `body` | no | - | The HTTP request body. The body can be static text or include - Mustache <>. When not specified, an empty + Mustache <>. When not specified, an empty body is sent. | `proxy.host` | no | - | The proxy host to use when connecting to the host. diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc index 01f55f9b6682a..6995d88a2f68b 100644 --- a/x-pack/docs/en/watcher/condition.asciidoc +++ b/x-pack/docs/en/watcher/condition.asciidoc @@ -1,44 +1,40 @@ +[role="xpack"] [[condition]] == Conditions When a watch is triggered, its condition determines whether or not to execute the watch actions. {watcher} supports the following condition types: -* <>: set the watch condition to `true` so the watch +* <>: set the watch condition to `true` so the watch actions are always executed. -* <>: set the watch condition to `false` so the watch +* <>: set the watch condition to `false` so the watch actions are never executed. -* <>: perform simple comparisons against values +* <>: perform simple comparisons against values in the watch payload to determine whether or not to execute the watch actions. -* <>: compare an array of values in the +* <>: compare an array of values in the watch payload to a given value to determine whether or not to execute the watch actions. -* <>: use a script to determine whether or not to +* <>: use a script to determine whether or not to execute the watch actions. NOTE: If you omit the condition definition from a watch, the condition defaults to `always`. When a condition is evaluated, it has full access to the watch execution context, -including the watch payload (`ctx.payload.*`). The <>, -<> and <> +including the watch payload (`ctx.payload.*`). The <>, +<> and <> conditions can use the payload data to determine whether or not the necessary conditions are met. In addition to the watch wide condition, you can also configure conditions -per <>. +per <>. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/always.asciidoc include::condition/always.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/never.asciidoc include::condition/never.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/compare.asciidoc include::condition/compare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/array-compare.asciidoc include::condition/array-compare.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/script.asciidoc include::condition/script.asciidoc[] diff --git a/x-pack/docs/en/watcher/condition/always.asciidoc b/x-pack/docs/en/watcher/condition/always.asciidoc index c2eb37be52c8f..bba7b1696e6fe 100644 --- a/x-pack/docs/en/watcher/condition/always.asciidoc +++ b/x-pack/docs/en/watcher/condition/always.asciidoc @@ -1,14 +1,15 @@ +[role="xpack"] [[condition-always]] -=== Always Condition +=== Always condition Use the `always` condition to set the condition to `true`. This forces the watch -actions to be executed unless they are <>. +actions to be executed unless they are <>. The `always` condition enables you to perform watch actions on a fixed schedule, such as, _"Every Friday at noon, send a status report email to sys.admin@example.com."_ -==== Using the Always Condition +==== Using the always condition This is the default if you omit the condition definition from a watch. diff --git a/x-pack/docs/en/watcher/condition/array-compare.asciidoc b/x-pack/docs/en/watcher/condition/array-compare.asciidoc index 200af386e6834..1e8ae5d9cbf9c 100644 --- a/x-pack/docs/en/watcher/condition/array-compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/array-compare.asciidoc @@ -1,19 +1,20 @@ +[role="xpack"] [[condition-array-compare]] -=== Array Compare Condition +=== Array compare condition Use `array_compare` to compare an array of values in the execution context to a -given value. See <> +given value. See <> for the operators you can use. -==== Using an Array Compare Condition +==== Using an array compare condition To use the `array_compare` condition, you specify the array in the execution -context that you want to evaluate, a <>, and the value you want to compare against. Optionally, you -can specify the path to the field in each array element that you want to -evaluate. +context that you want to evaluate, a +<>, and the value you want to +compare against. Optionally, you can specify the path to the field in each array +element that you want to evaluate. For example, the following `array_compare` condition returns `true` if there is at least one bucket in the aggregation that has a `doc_count` greater @@ -25,9 +26,9 @@ than or equal to 25: "condition": { "array_compare": { "ctx.payload.aggregations.top_tweeters.buckets" : { <1> - "path": "doc_count" <2>, + "path": "doc_count", <2> "gte": { <3> - "value": 25, <4> + "value": 25 <4> } } } @@ -38,14 +39,14 @@ than or equal to 25: <1> The path to the array in the execution context that you want to evaluate, specified in dot notation. <2> The path to the field in each array element that you want to evaluate. -<3> The <> to use. +<3> The <> to use. <4> The comparison value. Supports date math like the - <>. + <>. NOTE: When using fieldnames that contain a dot this condition will not work, use a <> instead. -==== Array-Compare Condition Attributes +==== Array-compare condition attributes [options="header"] |====== diff --git a/x-pack/docs/en/watcher/condition/compare.asciidoc b/x-pack/docs/en/watcher/condition/compare.asciidoc index eeb6ae868774c..f6fd06a11beec 100644 --- a/x-pack/docs/en/watcher/condition/compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/compare.asciidoc @@ -1,12 +1,13 @@ +[role="xpack"] [[condition-compare]] -=== Compare Condition +=== Compare condition Use the `compare` condition to perform a simple comparison against a value in the watch payload. You can use the `compare` condition without enabling dynamic scripting. [[condition-compare-operators]] -. Supported Comparison Operators +.Supported comparison operators [options="header"] |====== | Name | Description @@ -30,13 +31,13 @@ dynamic scripting. given one (applies to numeric and string values) |====== -==== Using a Compare Condition +==== Using a compare condition To use the `compare` condition, you specify the value in the execution context that you want to evaluate, a <>, and the value you want to compare against. For example, the following `compare` -condition returns `true` if the number of the total hits in the <> is greater than or equal to 5: +condition returns `true` if the number of the total hits in the +<> is greater than or equal to 5: [source,js] -------------------------------------------------- @@ -46,6 +47,7 @@ search result>> is greater than or equal to 5: "ctx.payload.hits.total.value" : { <1> "gte" : 5 <2> } + } } } -------------------------------------------------- @@ -66,6 +68,7 @@ of the form `<{expression}>`. For example, the following expression returns "ctx.execution_time" : { "gte" : "<{now-5m}>" } + } } } -------------------------------------------------- @@ -84,12 +87,13 @@ to the `ctx.payload.aggregations.handled.buckets.true.doc_count`: "ctx.payload.aggregations.status.buckets.error.doc_count" : { "not_eq" : "{{ctx.payload.aggregations.handled.buckets.true.doc_count}}" } + } } } -------------------------------------------------- // NOTCONSOLE -==== Accessing Values in the Execution Context +==== Accessing values in the execution context You use "dot-notation" to access values in the execution context. Values loaded into the execution context by the input are prefixed by `ctx.payload`. diff --git a/x-pack/docs/en/watcher/condition/never.asciidoc b/x-pack/docs/en/watcher/condition/never.asciidoc index b8cad0b8c04d5..0328601f00e25 100644 --- a/x-pack/docs/en/watcher/condition/never.asciidoc +++ b/x-pack/docs/en/watcher/condition/never.asciidoc @@ -1,12 +1,13 @@ +[role="xpack"] [[condition-never]] -=== Never Condition +=== Never condition Use the `never` condition to set the condition to `false`. This means the watch actions are never executed when the watch is triggered. The watch input is executed, a record is added to the watch history, and the watch execution ends. This condition is generally used for testing. -==== Using the Never Condition +==== Using the never condition There are no attributes to specify for the `never` condition. To use the it, you specify the condition type and associate it with an empty object: diff --git a/x-pack/docs/en/watcher/condition/script.asciidoc b/x-pack/docs/en/watcher/condition/script.asciidoc index ee6a9531bf7b3..4dcc23ff25f75 100644 --- a/x-pack/docs/en/watcher/condition/script.asciidoc +++ b/x-pack/docs/en/watcher/condition/script.asciidoc @@ -1,14 +1,14 @@ +[role="xpack"] [[condition-script]] -=== Script Condition +=== Script condition -A watch <> that evaluates a script. The default scripting +A watch <> that evaluates a script. The default scripting language is `painless`. You can use any of the scripting languages supported by Elasticsearch as long as the language supports evaluating expressions to Boolean values. Note that the `mustache` and `expression` languages are too limited to be -used by this condition. For more information, see {ref}/modules-scripting.html[Scripting] -in the Elasticsearch Reference. +used by this condition. For more information, see <>. -==== Using a Script Condition +==== Using a script condition The following snippet configures an inline `script` condition that always returns `true`: @@ -22,11 +22,11 @@ The following snippet configures an inline `script` condition that always return // NOTCONSOLE This example defines a script as a simple string. This format is actually a -shortcut for defining an <> script. The +shortcut for defining an <> script. The formal definition of a script is an object that specifies the script type and optional language and parameter values. If the `lang` attribute is omitted, the language defaults to `painless`. Elasticsearch supports two types of scripts, -<> and <>. +<> and <>. For example, the following snippet shows a formal definition of an `inline` script that explicitly specifies the language and defines a single script @@ -47,7 +47,7 @@ parameter, `result`: // NOTCONSOLE [[condition-script-inline]] -==== Inline Scripts +==== Inline scripts Inline scripts are scripts that are defined in the condition itself. The following snippet shows the formal configuration of a simple painless script that @@ -64,10 +64,11 @@ always returns `true`. // NOTCONSOLE [[condition-script-stored]] -==== Stored Scripts +==== Stored scripts -Stored scripts refer to scripts that were {ref}/modules-scripting-using.html#modules-scripting-stored-scripts[stored] -in Elasticsearch. The following snippet shows how to refer to a script by its `id`: +Stored scripts refer to scripts that were +<> in Elasticsearch. The following +snippet shows how to refer to a script by its `id`: [source,js] -------------------------------------------------- @@ -79,8 +80,8 @@ in Elasticsearch. The following snippet shows how to refer to a script by its `i -------------------------------------------------- // NOTCONSOLE -As with <> -scripts, you can also specify the script language and parameters: +As with <> scripts, you can also specify the +script language and parameters: [source,js] -------------------------------------------------- @@ -95,14 +96,14 @@ scripts, you can also specify the script language and parameters: // NOTCONSOLE [[accessing-watch-payload]] -==== Accessing the Watch Payload +==== Accessing the watch payload A script can access the current watch execution context, including the payload data, as well as any parameters passed in through the condition definition. -For example, the following snippet defines a watch that uses a <> -and uses a `script` condition to check if the number of hits is above a specified -threshold: +For example, the following snippet defines a watch that uses a +<> and uses a `script` condition to check if the +number of hits is above a specified threshold: [source,js] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/customizing-watches.asciidoc b/x-pack/docs/en/watcher/customizing-watches.asciidoc index 27aa2baef04b7..6aa5a26aecbbd 100644 --- a/x-pack/docs/en/watcher/customizing-watches.asciidoc +++ b/x-pack/docs/en/watcher/customizing-watches.asciidoc @@ -1,29 +1,31 @@ +[role="xpack"] [[customizing-watches]] -== Customizing Watches +== Customizing watches -Now that you've seen how to set up simple watches to <> -and <>, let's take a closer -look at how you can customize a watch by modifying its <>, -<>, <>, and -<>. +Now that you've seen how to set up simple watches to +<> and +<>, let's take a closer +look at how you can customize a watch by modifying its <>, +<>, <>, and +<>. [[changing-inputs]] -=== Changing Inputs +=== Changing inputs The Watch Input is called when the watch triggered to load an initial payload. This payload is stored in the _Watch Execution Context_ and from then on is available for other watch elements to access (e.g. watch conditions can be evaluated based on the data in this payload). -{watcher} supports four types of inputs <>, -<>, <>, and -<>. +{watcher} supports four types of inputs <>, +<>, <>, and +<>. [[loading-static-data]] -==== Loading a Static Payload with the Simple Input +==== Loading a static payload with the simple input To load static data into the watch payload for testing purposes, you can use the -<> input. For example, the following input stores three +<> input. For example, the following input stores three fields in the payload: [source,js] @@ -41,13 +43,13 @@ fields in the payload: See <> for more details. [[loading-search-results]] -==== Loading a Payload from Elasticsearch with the Search Input +==== Loading a payload from Elasticsearch with the search input You can use the `search` input to load Elasticsearch search results as the watch initial payload. -A <> input contains a `request` object that specifies the -indices you want to search, the {ref}/search-request-search-type.html[search type], +A <> input contains a `request` object that specifies the +indices you want to search, the <>, and the search request body. The `body` field of a search input is the same as the body of an Elasticsearch `_search` request, making the full Elasticsearch Query DSL available for you to use. @@ -81,7 +83,7 @@ For example, the following `search` input loads the latest VIX quote: See <> for more details. [[loading-http-data]] -==== Loading a Payload from a remote HTTP Service with HTTP Input +==== Loading a payload from a remote HTTP service with HTTP input Use the `http` input to issue an HTTP request and load the returned response as the watch initial payload. This input expects the response body content type @@ -111,28 +113,28 @@ Amsterdam using http://openweathermap.org/appid[OpenWeatherMap] online service: See <> for more details. [[chaining-inputs]] -==== Chaining Inputs +==== Chaining inputs -You can create an <> to load data from multiple sources +You can create an <> to load data from multiple sources into a watch payload. The inputs in a chain are processed in order, so the the data loaded by one input can be used by subsequent inputs. See <> for more details. [[changing-conditions]] -=== Changing Conditions +=== Changing conditions The Watch Condition is evaluated as part of the watch execution. The condition determines whether the actions associated with the watch should execute or not. -{watcher} supports four types of conditions <>, -<>, <>, and -<>. +{watcher} supports four types of conditions <>, +<>, <>, and +<>. The first two are pretty self-explanatory--they are shortcuts for setting a watch's condition to `true` or `false`. -==== Simple Value Comparison with the Compare Condition +==== Simple value comparison with the compare condition The `compare` condition enables you to perform simple comparisons against values in the Watch payload. While you can also do this with a `script` condition, with @@ -151,7 +153,7 @@ returned any hits: // NOTCONSOLE See <> for more details. -==== Powerful Comparison Logic with the Script Condition +==== Powerful comparison logic with the script condition For more complex conditional logic you can use the `script` condition. The `script` condition accepts a script that when executed returns `true` (indicating @@ -160,7 +162,7 @@ language defaults to the default script language in Elasticsearch, but you can also use any other supported language in the system. NOTE: Starting with 5.0, Elasticsearch is shipped with the new - {ref}/modules-scripting-painless.html[Painless] scripting language. + <> scripting language. Painless was created and designed specifically for use in Elasticsearch. Beyond providing an extensive feature set, its biggest trait is that it's properly sandboxed and safe to use anywhere in the system (including in @@ -182,13 +184,13 @@ VIX quote loaded by the `http` input is either greater than 5% or lower than -5% See <> for more details. [[using-transforms]] -=== Using Transforms +=== Using transforms Transforms are constructs in a watch that can change the current payload associated with the watch execution context. -{watcher} supports three types of transforms <>, -<> and <>. A `search` transform +{watcher} supports three types of transforms <>, +<> and <>. A `search` transform replaces the existing payload with the response of a new search request. You can use `script` transforms to modify the existing payload. A `chain` transform enables you to perform a series of `search` and `script` transforms. @@ -196,19 +198,19 @@ enables you to perform a series of `search` and `script` transforms. See <> for more details. [[customizing-actions]] -=== Customizing Actions +=== Customizing actions Actions are associated with a watch and are executed as part of the watch execution only when the watch condition is met. -{watcher} supports the following action types: <>, -<>, <>, <>, -<>, <>, and <>. +{watcher} supports the following action types: <>, +<>, <>, +<>, <>, and <>. -To use the `email` action, you need to <> +To use the `email` action, you need to <> in `elasticsearch.yml` that {watcher} can use to send email. Your custom email messages can be plain text or styled using HTML. You can include information from -the watch execution payload using <>, as well as attach the +the watch execution payload using <>, as well as attach the entire watch payload to the message. For example, the following email action uses a template in the email body and diff --git a/x-pack/docs/en/watcher/encrypting-data.asciidoc b/x-pack/docs/en/watcher/encrypting-data.asciidoc index 66138b54efba2..60e8414f31a90 100644 --- a/x-pack/docs/en/watcher/encrypting-data.asciidoc +++ b/x-pack/docs/en/watcher/encrypting-data.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[encrypting-data]] -== Encrypting Sensitive Data in {watcher} +== Encrypting sensitive data in {watcher} Watches might have access to sensitive data such as HTTP basic authentication information or details about your SMTP email service. You can encrypt this @@ -14,7 +15,7 @@ encrypted. To encrypt sensitive data in {watcher}: -. Use the {ref}/syskeygen.html[elasticsearch-syskeygen] command to create a system key file. +. Use the <> command to create a system key file. . Copy the `system_key` file to all of the nodes in your cluster. + @@ -24,8 +25,7 @@ every node in the cluster. -- -. Set the -{ref}/notification-settings.html[`xpack.watcher.encrypt_sensitive_data` setting]: +. Set the <>: + -- @@ -36,8 +36,8 @@ xpack.watcher.encrypt_sensitive_data: true -- . Set the -{ref}/notification-settings.html[`xpack.watcher.encryption_key` setting] in the -{ref}/secure-settings.html[{es} keystore] on each node in the cluster. +<> in the +<> on each node in the cluster. + -- For example, run the following command to import the `system_key` file on diff --git a/x-pack/docs/en/watcher/example-watches.asciidoc b/x-pack/docs/en/watcher/example-watches.asciidoc index 2a402b20261d7..377f0c393556d 100644 --- a/x-pack/docs/en/watcher/example-watches.asciidoc +++ b/x-pack/docs/en/watcher/example-watches.asciidoc @@ -1,16 +1,15 @@ +[role="xpack"] [[example-watches]] -== Example Watches +== Example watches The following examples show how to set up watches to: -* <> -* <> +* <> +* <> For more example watches you can use as a starting point for building custom watches, see the https://github.com/elastic/examples/tree/master/Alerting[Example -Watches] in the Elastic Examples repo. +watches] in the Elastic Examples repo. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc include::example-watches/example-watch-clusterstatus.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc include::example-watches/example-watch-meetupdata.asciidoc[] diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc index 5506f206b45fc..3b8e9a3322e61 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc @@ -1,23 +1,24 @@ +[role="xpack"] [[watch-cluster-status]] -=== Watching the Status of an Elasticsearch Cluster +=== Watching the status of an Elasticsearch cluster You can easily configure a basic watch to monitor the health of your Elasticsearch cluster: -* <> that gets the +* <> that gets the cluster health status. -* <> that evaluates the health status to +* <> that evaluates the health status to determine if action is required. -* <> if the cluster is RED. +* <> if the cluster is RED. [float] [[health-add-input]] -==== Schedule the Watch and Add an Input +==== Schedule the watch and add an input -A watch <> controls how often a watch is triggered. -The watch <> gets the data that you want to evaluate. +A watch <> controls how often a watch is triggered. +The watch <> gets the data that you want to evaluate. The simplest way to define a schedule is to specify an interval. For example, the following schedule runs every 10 seconds: @@ -48,7 +49,7 @@ GET _cluster/health?pretty // TEST[continued] To load the health status into your watch, you simply add an -<> that calls the cluster health API: +<> that calls the cluster health API: [source,js] -------------------------------------------------- @@ -70,7 +71,8 @@ PUT _watcher/watch/cluster_health_watch -------------------------------------------------- // CONSOLE -If you're using Security, then you'll also need to supply some authentication credentials as part of the watch configuration: +If you're using Security, then you'll also need to supply some authentication +credentials as part of the watch configuration: [source,js] -------------------------------------------------- @@ -98,9 +100,12 @@ PUT _watcher/watch/cluster_health_watch -------------------------------------------------- // CONSOLE -It would be a good idea to create a user with the minimum privileges required for use with such a watch configuration. +It would be a good idea to create a user with the minimum privileges required +for use with such a watch configuration. -Depending on how your cluster is configured, there may be additional settings required before the watch can access your cluster such as keystores, truststores or certificates. For more information, see {ref}/notification-settings.html[Notification Settings]. +Depending on how your cluster is configured, there may be additional settings +required before the watch can access your cluster such as keystores, truststores, +or certificates. For more information, see <>. If you check the watch history, you'll see that the cluster status is recorded @@ -123,9 +128,9 @@ GET .watcher-history*/_search [float] [[health-add-condition]] -==== Add a Condition +==== Add a condition -A <> evaluates the data you've loaded into the watch and +A <> evaluates the data you've loaded into the watch and determines if any action is required. Since you've defined an input that loads the cluster status into the watch, you can define a condition that checks that status. @@ -178,11 +183,11 @@ GET .watcher-history*/_search?pretty [float] [[health-take-action]] -==== Take Action +==== Take action Recording `watch_records` in the watch history is nice, but the real power of {watcher} is being able to do something in response to an alert. A watch's -<> define what to do when the watch condition is true--you +<> define what to do when the watch condition is true--you can send emails, call third-party webhooks, or write documents to an Elasticsearch index or log when the watch condition is met. @@ -251,7 +256,7 @@ xpack.notification.email.account: NOTE: If you have advanced security options enabled for your email account, you need to take additional steps to send email from {watcher}. For more - information, see <>. + information, see <>. You can check the watch history or the `status_index` to see that the action was performed. @@ -270,13 +275,13 @@ GET .watcher-history*/_search?pretty [float] [[health-delete]] -==== Delete the Watch +==== Delete the watch Since the `cluster_health_watch` is configured to run every 10 seconds, make sure you delete it when you're done experimenting. Otherwise, you'll spam yourself indefinitely. -To remove the watch, use the {ref}/watcher-api-delete-watch.html[DELETE watch API]: +To remove the watch, use the <>: [source,js] ------------------------------------------------------- diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc index 077e27f00eddd..15f06c9ceb305 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[watching-meetup-data]] -=== Watching Event Data +=== Watching event data If you are indexing event data, such as log messages, network traffic, or a web feed, you can create a watch to email notifications when certain events occur. For example, if you index a feed of RSVPs for meetup events happening around the world, you can create a watch that alerts you to interesting events. @@ -50,7 +51,7 @@ output { <2> [source,shell] ---------------------------------------------------------- curl http://stream.meetup.com/2/rsvps | bin/logstash -f livestream.conf ---------------------------------------------------------- +---------------------------------------------------------- // NOTCONSOLE -- @@ -143,7 +144,7 @@ To set up the watch: } } }, --------------------------------------------------- +------------------------------------------------- // NOTCONSOLE <1> Elasticsearch Date math is used to select the Logstash indices that contain the meetup data. The second pattern is needed in case the previous hour crosses days. <2> Find all of the RSVPs with `Open Source` as a topic. @@ -167,7 +168,7 @@ To set up the watch: + -- [source,js] --------------------------------------------------- +--------------------------------------------------- "actions": { "email_me": { "throttle_period": "10m", @@ -188,7 +189,7 @@ To set up the watch: // NOTCONSOLE -- -NOTE: To enable Watcher to send emails, you must configure an email account in `elasticsearch.yml`. For more information, see <>. +NOTE: To enable Watcher to send emails, you must configure an email account in `elasticsearch.yml`. For more information, see <>. The complete watch looks like this: @@ -290,7 +291,7 @@ PUT _watcher/watch/meetup -------------------------------------------------- // CONSOLE -<1> The email body can include Mustache templates to reference data in the watch payload. By default,it will be <> to block dangerous content. +<1> The email body can include Mustache templates to reference data in the watch payload. By default,it will be <> to block dangerous content. <2> Replace the `from` address with the email address you configured in `elasticsearch.yml`. <3> Replace the `to` address with your email address to receive notifications. diff --git a/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc b/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc index 74074d2ac3d0f..4f2b8ed2a570e 100644 --- a/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[watching-time-series-data]] -=== Watching Time Series Data +=== Watching time series data If you are indexing time-series data such as logs, RSS feeds, or network traffic, you can use {watcher} to send notifications when certain events occur. @@ -151,7 +152,7 @@ you can then reference it by name in the watch condition. NOTE: To use the email action, you must configure at least one email account in `elasticsearch.yml`. If you configure multiple email accounts, you need to specify which one you want to send the email with. For more information, see -<>. +<>. The complete watch looks like this: diff --git a/x-pack/docs/en/watcher/getting-started.asciidoc b/x-pack/docs/en/watcher/getting-started.asciidoc index 8556f6b564bb9..f6045d5c5ed99 100644 --- a/x-pack/docs/en/watcher/getting-started.asciidoc +++ b/x-pack/docs/en/watcher/getting-started.asciidoc @@ -1,9 +1,11 @@ +[role="xpack"] [[watcher-getting-started]] -== Getting Started with {watcher} +== Getting started with {watcher} -By default, when you install {es} and {kib}, {xpack} is installed and the -{watcher} is enabled. You cannot use {watcher} with the free basic license, but -you can try all of the {xpack} features with a <>. +TIP: To complete these steps, you must obtain a license that includes the +{alert-features}. For more information about Elastic license levels, see +https://www.elastic.co/subscriptions and +{stack-ov}/license-management.html[License management]. [[watch-log-data]] To set up a watch to start sending alerts: @@ -16,14 +18,14 @@ condition is met. [float] [[log-add-input]] -=== Schedule the Watch and Define an Input +=== Schedule the watch and define an input -A watch {xpack-ref}/trigger-schedule.html[schedule] controls how often a watch is triggered. -The watch {xpack-ref}/input.html[input] gets the data that you want to evaluate. +A watch <> controls how often a watch is triggered. +The watch <> gets the data that you want to evaluate. To periodically search log data and load the results into the -watch, you could use an {xpack-ref}/trigger-schedule.html#schedule-interval[interval] schedule and a -{xpack-ref}/input-search.html[search] input. For example, the following Watch searches +watch, you could use an <> schedule and a +<> input. For example, the following Watch searches the `logs` index for errors every 10 seconds: [source,js] @@ -74,9 +76,9 @@ GET .watcher-history*/_search?pretty [float] [[log-add-condition]] -=== Add a Condition +=== Add a condition -A {xpack-ref}/condition.html[condition] evaluates the data you've loaded into the watch and +A <> evaluates the data you've loaded into the watch and determines if any action is required. Now that you've loaded log errors into the watch, you can define a condition that checks to see if any errors were found. @@ -107,7 +109,7 @@ PUT _watcher/watch/log_error_watch } -------------------------------------------------- // CONSOLE -<1> The {xpack-ref}/condition-compare.html[compare] condition lets you easily compare against +<1> The <> condition lets you easily compare against values in the execution context. For this compare condition to evaluate to `true`, you need to add an event @@ -151,11 +153,11 @@ GET .watcher-history*/_search?pretty [float] [[log-take-action]] -=== Configure an Action +=== Configure an action Recording watch records in the watch history is nice, but the real power of {watcher} is being able to do something when the watch condition is met. A -watch's {xpack-ref}/actions.html[actions] define what to do when the watch condition +watch's <> define what to do when the watch condition evaluates to `true`. You can send emails, call third-party webhooks, write documents to an Elasticsearch index, or log messages to the standard Elasticsearch log files. @@ -203,7 +205,7 @@ delete it when you're done experimenting. Otherwise, the noise from this sample watch will make it hard to see what else is going on in your watch history and log file. -To remove the watch, use the {ref}/watcher-api-delete-watch.html[DELETE watch API]: +To remove the watch, use the <>: [source,js] -------------------------------------------------- @@ -214,7 +216,7 @@ DELETE _watcher/watch/log_error_watch [float] [[required-security-privileges]] -=== Required Security Privileges +=== Required security privileges To enable users to create and manipulate watches, assign them the `watcher_admin` security role. Watcher admins can also view watches, watch history, and triggered watches. @@ -225,11 +227,11 @@ allowed to execute read-only watch operations. [float] [[next-steps]] -=== Where to Go Next +=== Where to go next -* See {xpack-ref}/how-watcher-works.html[How {watcher} Works] for more information about the +* See <> for more information about the anatomy of a watch and the watch lifecycle. -* See {xpack-ref}/example-watches.html[Example Watches] for more examples of setting up +* See <> for more examples of setting up a watch. * See the https://github.com/elastic/examples/tree/master/Alerting[Example Watches] in the Elastic Examples repo for additional sample watches you can use diff --git a/x-pack/docs/en/watcher/gs-index.asciidoc b/x-pack/docs/en/watcher/gs-index.asciidoc index e799adec40a34..0bd295f3c461b 100644 --- a/x-pack/docs/en/watcher/gs-index.asciidoc +++ b/x-pack/docs/en/watcher/gs-index.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[xpack-alerting]] -= Alerting on Cluster and Index Events += Alerting on cluster and index events [partintro] -- diff --git a/x-pack/docs/en/watcher/how-watcher-works.asciidoc b/x-pack/docs/en/watcher/how-watcher-works.asciidoc index 80aeb69a38d5b..02e8ed0ef00d4 100644 --- a/x-pack/docs/en/watcher/how-watcher-works.asciidoc +++ b/x-pack/docs/en/watcher/how-watcher-works.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[how-watcher-works]] -== How {watcher} Works +== How {watcher} works You <> to automatically perform an action when certain conditions are met. The conditions are generally based on data you've @@ -15,7 +16,7 @@ This topic describes the elements of a watch and how watches operate. [float] [[watch-definition]] -=== Watch Definition +=== Watch definition A watch consists of a _trigger_, _input_, _condition_, and _actions_. The actions define what needs to be done once the condition is met. In addition, you can @@ -43,7 +44,7 @@ Specify what happens when the watch condition is met. [[watch-definition-example]] For example, the following snippet shows a -{ref}/watcher-api-put-watch.html[Put Watch] request that defines a watch that +<> request that defines a watch that looks for log error events: [source,js] @@ -130,7 +131,7 @@ PUT _watcher/watch/log_errors [float] [[watch-execution]] -=== Watch Execution +=== Watch execution [[schedule-scheduler]] When you add a watch, {watcher} immediately registers its trigger with the @@ -149,14 +150,14 @@ primary and all replicas of this particular shard will reload. Because the watches are executed on the node, where the watch shards are, you can create dedicated watcher nodes by using shard allocation filtering. -You could configure nodes with a dedicated `node.attr.watcher: true` property and +You could configure nodes with a dedicated `node.attr.role: watcher` property and then configure the `.watches` index like this: [source,js] ------------------------ PUT .watches/_settings { - "index.routing.allocation.include": "watcher" + "index.routing.allocation.include.role": "watcher" } ------------------------ // CONSOLE @@ -198,7 +199,7 @@ image::images/watch-execution.jpg[align="center"] [float] [[watch-acknowledgment-throttling]] -=== Watch Acknowledgment and Throttling +=== Watch acknowledgment and throttling {watcher} supports both time-based and acknowledgment-based throttling. This enables you to prevent actions from being repeatedly executed for the same event. @@ -218,7 +219,7 @@ For more information, see <>. [float] [[watch-active-state]] -=== Watch Active State +=== Watch active state By default, when you add a watch it is immediately set to the _active_ state, registered with the appropriate trigger engine, and executed according @@ -228,13 +229,13 @@ You can also set a watch to the _inactive_ state. Inactive watches are not registered with a trigger engine and can never be triggered. To set a watch to the inactive state when you create it, set the -{ref}/watcher-api-put-watch.html[`active`] parameter to _inactive_. To +<> parameter to _inactive_. To deactivate an existing watch, use the -{ref}/watcher-api-deactivate-watch.html[Deactivate Watch API]. To reactivate an +<>. To reactivate an inactive watch, use the -{ref}/watcher-api-activate-watch.html[Activate Watch API]. +<>. -NOTE: You can use the {ref}/watcher-api-execute-watch.html[Execute Watch API] +NOTE: You can use the <> to force the execution of a watch even when it is inactive. Deactivating watches is useful in a variety of situations. For example, if you @@ -247,7 +248,7 @@ deleting it from the system. [float] [[scripts-templates]] -=== Scripts and Templates +=== Scripts and templates You can use scripts and templates when defining a watch. Scripts and templates can reference elements in the watch execution context, including the watch payload. @@ -258,13 +259,12 @@ placeholders in a template. <> and <>. Scripts and templates are compiled and cached by Elasticsearch to optimize recurring execution. Autoloading is also -supported. For more information, see {ref}/modules-scripting.html[Scripting] and -{ref}/modules-scripting-using.html[How to use scripts] in the Elasticsearch -Reference. +supported. For more information, see <> and +<>. [float] [[watch-execution-context]] -==== Watch Execution Context +==== Watch execution context The following snippet shows the basic structure of the _Watch Execution Context_: @@ -298,14 +298,14 @@ The following snippet shows the basic structure of the _Watch Execution Context_ [float] [[scripts]] -==== Using Scripts +==== Using scripts -You can use scripts to define <> and -<>. The default scripting language is -{ref}/modules-scripting-painless.html[Painless]. +You can use scripts to define <> and +<>. The default scripting language is +<>. NOTE: Starting with 5.0, Elasticsearch is shipped with the new - {ref}/modules-scripting-painless.html[Painless] scripting language. + <> scripting language. Painless was created and designed specifically for use in Elasticsearch. Beyond providing an extensive feature set, its biggest trait is that it's properly sandboxed and safe to use anywhere in the system (including in @@ -323,7 +323,7 @@ access its value via the `color` variable. [float] [[templates]] -==== Using Templates +==== Using templates You use templates to define dynamic content for a watch. At execution time, templates pull in data from the watch execution context. For example, you can use @@ -353,7 +353,7 @@ in sent emails: [float] [[inline-templates-scripts]] -===== Inline Templates and Scripts +===== Inline templates and scripts To define an inline template or script, you simply specify it directly in the value of a field. For example, the following snippet configures the subject of @@ -415,9 +415,9 @@ The formal object definition for a script would be: [float] [[stored-templates-scripts]] -===== Stored Templates and Scripts +===== Stored templates and scripts -If you {ref}/modules-scripting-using.html#modules-scripting-stored-scripts[store] +If you <> your templates and scripts, you can reference them by id. To reference a stored script or template, you use the formal object definition @@ -442,4 +442,4 @@ references the `email_notification_subject` template: } } ---------------------------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE diff --git a/x-pack/docs/en/watcher/images/hipchat-copy-room-token.jpg b/x-pack/docs/en/watcher/images/hipchat-copy-room-token.jpg deleted file mode 100644 index 3b954269fae1d..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-copy-room-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-copy-user-token.jpg b/x-pack/docs/en/watcher/images/hipchat-copy-user-token.jpg deleted file mode 100644 index 5fbf3a84eb301..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-copy-user-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-copy-v1-token.jpg b/x-pack/docs/en/watcher/images/hipchat-copy-v1-token.jpg deleted file mode 100644 index e7403309418a1..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-copy-v1-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-generate-room-token.jpg b/x-pack/docs/en/watcher/images/hipchat-generate-room-token.jpg deleted file mode 100644 index ea296f60b6db6..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-generate-room-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-generate-user-token.jpg b/x-pack/docs/en/watcher/images/hipchat-generate-user-token.jpg deleted file mode 100644 index 923626f8807c7..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-generate-user-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-generate-v1-token.jpg b/x-pack/docs/en/watcher/images/hipchat-generate-v1-token.jpg deleted file mode 100644 index df594d12882a4..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-generate-v1-token.jpg and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/hipchat-integration-example.png b/x-pack/docs/en/watcher/images/hipchat-integration-example.png deleted file mode 100644 index 9d8bec6bdf947..0000000000000 Binary files a/x-pack/docs/en/watcher/images/hipchat-integration-example.png and /dev/null differ diff --git a/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png b/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png new file mode 100644 index 0000000000000..f6a3ab4354a21 Binary files /dev/null and b/x-pack/docs/en/watcher/images/watcher-ui-edit-watch.png differ diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 5f51c948ebf3a..13b03fdcbd66d 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[xpack-alerting]] = Alerting on cluster and index events @@ -65,35 +66,28 @@ from the query, whether the condition was met, and what actions were taken. -- -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/getting-started.asciidoc include::getting-started.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/how-watcher-works.asciidoc include::how-watcher-works.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/encrypting-data.asciidoc include::encrypting-data.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input.asciidoc include::input.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger.asciidoc include::trigger.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition.asciidoc include::condition.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions.asciidoc include::actions.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform.asciidoc include::transform.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java.asciidoc include::java.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/managing-watches.asciidoc include::managing-watches.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches.asciidoc include::example-watches.asciidoc[] + +include::troubleshooting.asciidoc[] + +include::limitations.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/input.asciidoc b/x-pack/docs/en/watcher/input.asciidoc index 6dee849c735f9..45a33379a8e35 100644 --- a/x-pack/docs/en/watcher/input.asciidoc +++ b/x-pack/docs/en/watcher/input.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input]] == Inputs @@ -8,25 +9,21 @@ input. {watcher} supports four input types: -* <>: load static data into the execution context. -* <>: load the results of a search into the execution +* <>: load static data into the execution context. +* <>: load the results of a search into the execution context. -* <>: load the results of an HTTP request into the execution +* <>: load the results of an HTTP request into the execution context. -* <>: use a series of inputs to load data into the +* <>: use a series of inputs to load data into the execution context. NOTE: If you don't define an input for a watch, an empty payload is loaded into the execution context. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/simple.asciidoc include::input/simple.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/search.asciidoc include::input/search.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/http.asciidoc include::input/http.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/chain.asciidoc include::input/chain.asciidoc[] diff --git a/x-pack/docs/en/watcher/input/chain.asciidoc b/x-pack/docs/en/watcher/input/chain.asciidoc index 9952773e7227a..7ff39d97d2906 100644 --- a/x-pack/docs/en/watcher/input/chain.asciidoc +++ b/x-pack/docs/en/watcher/input/chain.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[input-chain]] -=== Chain Input +=== Chain input Use the `chain` input to load data from multiple sources into the watch execution context when the watch is triggered. The inputs in a chain @@ -44,12 +45,12 @@ path set by a `simple` input: arbitrary objects.) <2> Loads the `path` set by the `first` input. -==== Accessing Chained Input Data +==== Accessing chained input data To reference data loaded by a particular input, you use the input's name, `ctx.payload..`. -==== Transforming Chained Input Data +==== Transforming chained input data In certain use-cases the output of the first input should be used as input in a subsequent input. This requires you to do a transform, before you pass @@ -72,7 +73,7 @@ still be available in its original form in `ctx.payload.first`. { "second" : { "transform" : { - "script" : "return [ 'path' : 'ctx.payload.first.path' + '/' ]" + "script" : "return [ 'path' : ctx.payload.first.path + '/' ]" } } }, diff --git a/x-pack/docs/en/watcher/input/http.asciidoc b/x-pack/docs/en/watcher/input/http.asciidoc index 79d37d14a1bf4..bd48d339d64b1 100644 --- a/x-pack/docs/en/watcher/input/http.asciidoc +++ b/x-pack/docs/en/watcher/input/http.asciidoc @@ -1,9 +1,10 @@ +[role="xpack"] [[input-http]] -=== HTTP Input +=== HTTP input Use the `http` input to submit a request to an HTTP endpoint and load the response into the watch execution context when the watch is triggered. See -<> for all of the supported attributes. +<> for all of the supported attributes. With the `http` input, you can: @@ -13,15 +14,14 @@ With the `http` input, you can: need to search clusters that are running different Elasticsearch versions. * Query Elasticsearch APIs other than the search API. For example, you might want - to load data from the {ref}/cluster-nodes-stats.html[Nodes Stats], - {ref}/cluster-health.html[Cluster Health] or {ref}/cluster-state.html[Cluster - State] APIs. + to load data from the <>, + <> or <> APIs. * Query external web services. The `http` input enables you to load data from any service that exposes an HTTP endpoint. This provides a bridge between Elasticsearch clusters and other systems. -==== Querying External Elasticsearch Clusters +==== Querying external Elasticsearch clusters To query an external Elasticsearch cluster, you specify the cluster's `host` and `port` attributes and the index's search endpoint as the `path`. @@ -42,7 +42,7 @@ index: -------------------------------------------------- // NOTCONSOLE -You can use the full Elasticsearch {ref}/query-dsl.html[Query DSL] to perform +You can use the full Elasticsearch <> to perform more sophisticated searches. For example, the following `http` input retrieves all documents that contain `event` in the `category` field: @@ -66,8 +66,7 @@ all documents that contain `event` in the `category` field: To load the data from other Elasticsearch APIs, specify the API endpoint as the `path` attribute. Use the `params` attribute to specify query string parameters. For example, the following `http` input -calls the {ref}/cluster-stats.html[Cluster -Stats] API and enables the `human` attribute: +calls the <> API and enables the `human` attribute: [source,js] -------------------------------------------------- @@ -89,7 +88,7 @@ Stats] API and enables the `human` attribute: readable format. [[input-http-auth-basic-example]] -==== Calling External Web Services +==== Calling external web services You can use `http` input to get data from any external web service. The `http` input supports basic authentication. For example, the following input provides @@ -137,9 +136,9 @@ http://openweathermap.org/appid[OpenWeatherMap] service: -------------------------------------------------- // NOTCONSOLE -==== Using Templates +==== Using templates -The `http` input supports templating. You can use <> when +The `http` input supports templating. You can use <> when specifying the `path`, `body`, header values, and parameter values. For example, the following snippet uses templates to specify what index to query @@ -160,7 +159,7 @@ and restrict the results to documents added within the last five minutes: -------------------------------------------------- // NOTCONSOLE -==== Accessing the HTTP Response +==== Accessing the HTTP response If the response body is formatted in JSON or YAML, it is parsed and loaded into the execution context. If the response body is not formatted in JSON or YAML, it @@ -176,7 +175,7 @@ In addition all the headers from the response can be accessed using the [[http-input-attributes]] -==== HTTP Input Attributes +==== HTTP input attributes [cols=",^,^,", options="header"] |====== @@ -189,17 +188,17 @@ In addition all the headers from the response can be accessed using the | `request.port` | yes | - | The port the http service is listening on. | `request.path` | no | - | The URL path. The path can be static text or contain `mustache` - <>. URL query string parameters must be + <>. URL query string parameters must be specified via the `request.params` attribute. | `request.method` | no | get | The HTTP method. Supported values are: `head`, `get`, `post`, `put` and `delete`. | `request.headers` | no | - | The HTTP request headers. The header values can be static text - or include `mustache` <>. + or include `mustache` <>. | `request.params` | no | - | The URL query string parameters. The parameter values can be - static text or contain `mustache` <>. + static text or contain `mustache` <>. | `request.url` | no | - | Allows you to set `request.scheme`, `request.host`, `request.port` and `request.params` add once by specifying a real URL, like @@ -224,7 +223,7 @@ In addition all the headers from the response can be accessed using the | `request.body` | no | - | The HTTP request body. The body can be static text or include - `mustache` <>. + `mustache` <>. | `extract` | no | - | A array of JSON keys to extract from the input response and use as payload. In cases when an input generates a large diff --git a/x-pack/docs/en/watcher/input/search.asciidoc b/x-pack/docs/en/watcher/input/search.asciidoc index d4548a159a640..4eaa45006196c 100644 --- a/x-pack/docs/en/watcher/input/search.asciidoc +++ b/x-pack/docs/en/watcher/input/search.asciidoc @@ -1,15 +1,15 @@ +[role="xpack"] [[input-search]] -=== Search Input +=== Search input Use the `search` input to load the results of an Elasticsearch search request into the execution context when the watch is triggered. See -<> for all of the -supported attributes. +<> for all of the supported attributes. In the search input's `request` object, you specify: * The indices you want to search -* The {ref}/search-request-search-type.html[search type] +* The <> * The search request body The search request body supports the full Elasticsearch Query DSL--it's the @@ -59,7 +59,7 @@ the following input loads the latest VIXZ quote from today's daily quotes index: -------------------------------------------------- // NOTCONSOLE -==== Extracting Specific Fields +==== Extracting specific fields You can specify which fields in the search response you want to load into the watch payload with the `extract` attribute. This is useful when a search @@ -81,9 +81,9 @@ watch payload: -------------------------------------------------- // NOTCONSOLE -==== Using Templates +==== Using templates -The `search` input supports {ref}/search-template.html[search templates]. For +The `search` input supports <>. For example, the following snippet references the indexed template called `my_template` and passes a value of 23 to fill in the template's `value` parameter: @@ -109,11 +109,11 @@ parameter: -------------------------------------------------- // NOTCONSOLE -==== Applying Conditions +==== Applying conditions -The `search` input is often used in conjunction with the <> condition. For example, the following snippet adds a condition to -check if the search returned more than five hits: +The `search` input is often used in conjunction with the +<> condition. For example, the following snippet adds +a condition to check if the search returned more than five hits: [source,js] -------------------------------------------------- @@ -136,7 +136,7 @@ check if the search returned more than five hits: -------------------------------------------------- // NOTCONSOLE -==== Accessing the Search Results +==== Accessing the search results Conditions, transforms, and actions can access the search results through the watch execution context. For example: @@ -163,7 +163,7 @@ accurately. |====== | Name |Required | Default | Description -| `request.search_type` | no | `query_then_fetch` | The {ref}/search-request-search-type.html#search-request-search-type[type] +| `request.search_type` | no | `query_then_fetch` | The <> of search request to perform. Valid values are: `dfs_query_and_fetch`, `dfs_query_then_fetch`, `query_and_fetch`, and `query_then_fetch`. The Elasticsearch default is `query_then_fetch`. @@ -171,21 +171,21 @@ accurately. | `request.indices` | no | - | The indices to search. If omitted, all indices are searched, which is the default behaviour in Elasticsearch. -| `request.body` | no | - | The body of the request. The {ref}/search-request-body.html[request body] +| `request.body` | no | - | The body of the request. The <> follows the same structure you normally send in the body of a REST `_search` - request. The body can be static text or include `mustache` <>. + request. The body can be static text or include `mustache` <>. -| `request.template` | no | - | The body of the search template. See <> +| `request.template` | no | - | The body of the search template. See <> for more information. | `request.indices_options.expand_wildcards` | no | `open` | How to expand wildcards. Valid values are: `all`, `open`, `closed`, and `none` - See {ref}/multi-index.html#multi-index[`expand_wildcards`] for more information. + See <> for more information. | `request.indices_options.ignore_unavailable` | no | `true` | Whether the search should ignore unavailable indices. See - {ref}/multi-index.html#multi-index[`ignore_unavailable`] for more information. + <> for more information. | `request.indices_options.allow_no_indices` | no | `true` | Whether to allow a search where a wildcard indices expression results in no - concrete indices. See {ref}/multi-index.html#multi-index[allow_no_indices] + concrete indices. See <> for more information. | `extract` | no | - | A array of JSON keys to extract from the search response and load as the payload. diff --git a/x-pack/docs/en/watcher/input/simple.asciidoc b/x-pack/docs/en/watcher/input/simple.asciidoc index c756a4e5403e2..e69c79f53e1b8 100644 --- a/x-pack/docs/en/watcher/input/simple.asciidoc +++ b/x-pack/docs/en/watcher/input/simple.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[input-simple]] -=== Simple Input +=== Simple input Use the `simple` input to load static data into the execution context when the watch is triggered. This enables you to store the data diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index e5cb6b54b0c65..0de6758741335 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -60,6 +60,7 @@ repositories { // Add the Elasticsearch Maven Repository maven { + name "elastic" url "https://artifacts.elastic.co/maven" } } @@ -101,29 +102,20 @@ XPackClient xpackClient = new XPackClient(client); WatcherClient watcherClient = xpackClient.watcher(); -------------------------------------------------- -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/put-watch.asciidoc include::java/put-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/get-watch.asciidoc include::java/get-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/delete-watch.asciidoc include::java/delete-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/execute-watch.asciidoc include::java/execute-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/ack-watch.asciidoc include::java/ack-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/activate-watch.asciidoc include::java/activate-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc include::java/deactivate-watch.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/stats.asciidoc include::java/stats.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/service.asciidoc include::java/service.asciidoc[] diff --git a/x-pack/docs/en/watcher/java/ack-watch.asciidoc b/x-pack/docs/en/watcher/java/ack-watch.asciidoc index f24f0b89a0e1c..7cef48d6e3373 100644 --- a/x-pack/docs/en/watcher/java/ack-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/ack-watch.asciidoc @@ -1,13 +1,13 @@ [float] [[api-java-ack-watch]] -=== Ack Watch API +=== Ack watch API -<> a watch enables you to manually throttle +<> a watch enables you to manually throttle execution of the watch actions. The action's _acknowledgement state_ is stored in the `status.actions..ack.state` structure. The current status of the watch and the state of its actions are returned as part -of the <> response: +of the <> response: [source,java] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/java/activate-watch.asciidoc b/x-pack/docs/en/watcher/java/activate-watch.asciidoc index 63e88001a4be0..96ea3f5e23d8a 100644 --- a/x-pack/docs/en/watcher/java/activate-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/activate-watch.asciidoc @@ -1,12 +1,12 @@ [float] [[api-java-activate-watch]] -=== Activate Watch API +=== Activate watch API -A watch can be either <>. This API +A watch can be either <>. This API enables you to activate a currently inactive watch. The status of an inactive watch is returned with the watch definition -when you call the <>: +when you call the <>: [source,java] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc b/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc index 325f37bf32587..98c4220e68c88 100644 --- a/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc @@ -1,12 +1,12 @@ [float] [[api-java-deactivate-watch]] -=== Deactivate Watch API +=== Deactivate watch API -A watch can be either <>. This API +A watch can be either <>. This API enables you to deactivate a currently active watch. The status of an active watch is returned with the watch definition -when you call the <>: +when you call the <>: [source,java] -------------------------------------------------- diff --git a/x-pack/docs/en/watcher/java/delete-watch.asciidoc b/x-pack/docs/en/watcher/java/delete-watch.asciidoc index 4d37b910fd179..a019db933748c 100644 --- a/x-pack/docs/en/watcher/java/delete-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/delete-watch.asciidoc @@ -1,8 +1,8 @@ [float] [[api-java-delete-watch]] -=== Delete Watch API +=== Delete watch API -The DELETE watch API removes a watch (identified by its `id`) from {watcher}. +The delete watch API removes a watch (identified by its `id`) from {watcher}. Once removed, the document representing the watch in the `.watches` index is gone and it will never be executed again. diff --git a/x-pack/docs/en/watcher/java/execute-watch.asciidoc b/x-pack/docs/en/watcher/java/execute-watch.asciidoc index 34f2b8aa1e767..6379c09ed23d6 100644 --- a/x-pack/docs/en/watcher/java/execute-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/execute-watch.asciidoc @@ -1,6 +1,6 @@ [float] [[api-java-execute-watch]] -=== Execute Watch API +=== Execute watch API This API enables on-demand execution of a watch stored in the `.watches` index. It can be used to test a watch without executing all its actions or by ignoring diff --git a/x-pack/docs/en/watcher/java/get-watch.asciidoc b/x-pack/docs/en/watcher/java/get-watch.asciidoc index e4fcd86d85c85..f7a8c92fc20c2 100644 --- a/x-pack/docs/en/watcher/java/get-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/get-watch.asciidoc @@ -1,6 +1,6 @@ [float] [[api-java-get-watch]] -=== Get Watch API +=== Get watch API This API retrieves a watch by its id. diff --git a/x-pack/docs/en/watcher/java/put-watch.asciidoc b/x-pack/docs/en/watcher/java/put-watch.asciidoc index 682504187139b..7e584efaf038d 100644 --- a/x-pack/docs/en/watcher/java/put-watch.asciidoc +++ b/x-pack/docs/en/watcher/java/put-watch.asciidoc @@ -1,8 +1,8 @@ [float] [[api-java-put-watch]] -=== PUT Watch API +=== Put watch API -The PUT watch API either registers a new watch in {watcher} or update an +The put watch API either registers a new watch in {watcher} or update an existing one. Once registered, a new document will be added to the `.watches` index, representing the watch, and the watch trigger will immediately be registered with the relevant trigger engine (typically the scheduler, for the diff --git a/x-pack/docs/en/watcher/limitations.asciidoc b/x-pack/docs/en/watcher/limitations.asciidoc new file mode 100644 index 0000000000000..1f00d7f1b3e0f --- /dev/null +++ b/x-pack/docs/en/watcher/limitations.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[[watcher-limitations]] +== Watcher limitations + +[float] +=== Watches are not updated when file based scripts change + +When you refer to a file script in a watch, the watch itself is not updated +if you change the script on the filesystem. + +Currently, the only way to reload a file script in a watch is to delete +the watch and recreate it. + +[float] +=== Watcher UI + +When you create a new watch or edit an existing watch, if you navigate away +from the page without saving your changes they will be lost without warning. +Make sure to save your changes before leaving the page. + +image::images/watcher-ui-edit-watch.png[Editing a watch in Kibana] + +[float] +=== Security integration + +When the {security-features} are enabled, a watch stores information about what +the user who stored the watch is allowed to execute **at that time**. This means, +if those permissions change over time, the watch will still be able to execute +with the permissions that existed when the watch was created. diff --git a/x-pack/docs/en/watcher/managing-watches.asciidoc b/x-pack/docs/en/watcher/managing-watches.asciidoc index a155132d5e4b1..cfab593dfd938 100644 --- a/x-pack/docs/en/watcher/managing-watches.asciidoc +++ b/x-pack/docs/en/watcher/managing-watches.asciidoc @@ -1,18 +1,19 @@ +[role="xpack"] [[managing-watches]] -== Managing Watches +== Managing watches {watcher} provides as set of APIs you can use to manage your watches: -* Use the {ref}/watcher-api-put-watch.html[Put Watch API] to add or update watches -* Use the {ref}/watcher-api-get-watch.html[Get Watch API] to retrieve watches -* Use the {ref}/watcher-api-delete-watch.html[Delete Watch API] to delete watches -* Use the {ref}/watcher-api-activate-watch.html[Activate Watch API] to activate watches -* Use the {ref}/watcher-api-deactivate-watch.html[Deactivate Watch API] to deactivate watches -* Use the {ref}/watcher-api-ack-watch.html[Ack Watch API] to acknowledge watches +* Use the <> to add or update watches +* Use the <> to retrieve watches +* Use the <> to delete watches +* Use the <> to activate watches +* Use the <> to deactivate watches +* Use the <> to acknowledge watches [float] [[listing-watches]] -=== Listing Watches +=== Listing watches Currently there is not dedicated API for listing the stored watches. However, since {watcher} stores its watches in the `.watches` index, you can list them diff --git a/x-pack/docs/en/watcher/release-notes.asciidoc b/x-pack/docs/en/watcher/release-notes.asciidoc index 5875458a15433..d9410f69acb32 100644 --- a/x-pack/docs/en/watcher/release-notes.asciidoc +++ b/x-pack/docs/en/watcher/release-notes.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[watcher-release-notes]] == Watcher Release Notes (Pre-5.0) @@ -119,8 +120,8 @@ March 30, 2016 {ref}/watcher-api-execute-watch.html[Execute Watch API] .New Features -* Added <> -* Added support for adding <> +* Added <> +* Added support for adding <> via HTTP requests and superseding and deprecating the usage of `attach_data` in order to use this feature @@ -157,7 +158,7 @@ December 17, 2015 November 24, 2015 .New Features -* Adds support for <> +* Adds support for <> .Enhancements * Adds support for Elasticsearch 2.1.0. @@ -204,13 +205,13 @@ October 28, 2015 use the following index name ``. .New Features -* Added new <> -* Added new <> -* Watches now have an <>. In addition, a new +* Added new <> +* Added new <> +* Watches now have an <>. In addition, a new API was added to {ref}/watcher-api-activate-watch.html[activate] /{ref}watcher-api-deactivate-watch.html[deactivate] registered watches. -* Added new <>, that can compare an array - of values in the <> +* Added new <>, that can compare an array + of values in the <> to a given value. .Enhancements @@ -273,40 +274,40 @@ June 25, 2015 June 19, 2015 .New Features -* Added <> support to the Execute API +* Added <> support to the Execute API .Enhancements -* Added execution context <> support. -* Email html body sanitization is now <>. +* Added execution context <> support. +* Email html body sanitization is now <>. * It is now possible to configure timeouts for http requests in - <> and <>. + <> and <>. [float] ==== 1.0.0-Beta2 June 10, 2015 .New Features -* <> are now applied at the action +* <> are now applied at the action level rather than the watch level. -* Added support for <> +* Added support for <> indexing to the index action. -* Added a queued watches metric that's accessible via the <>. +* Added a queued watches metric that's accessible via the <>. * Added a currently-executing watches metric that's accessible via the - <>. + <>. .Enhancements -* The <> result now includes the value of +* The <> result now includes the value of each field that was referenced in the comparison. -* The <> now supports a default trigger +* The <> now supports a default trigger event (**breaking change**). * The `watch_record` document structure in the `.watch_history-*` indices has changed significantly (**breaking change**). * A new internal index was introduced - `.triggered_watches` -* Added support for headers in the <> result - and the <> result. -* Add plain text response body support for the <>. +* Added support for headers in the <> result + and the <> result. +* Add plain text response body support for the <>. .Bug Fixes -* Disallow negative time value settings for <> -* Added support for separate keystore and truststore in <> - and <>. +* Disallow negative time value settings for <> +* Added support for separate keystore and truststore in <> + and <>. diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 8241d7b0cb442..320bc0b30e432 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -1,10 +1,11 @@ +[role="xpack"] [[transform]] == Transforms -A _Transform_ processes and changes the payload in the watch execution context +A _transform_ processes and changes the payload in the watch execution context to prepare it for the watch actions. {watcher} supports three types of -transforms: <>, -<> and <>. +transforms: <>, +<> and <>. NOTE: Transforms are optional. When none are defined, the actions have access to @@ -56,11 +57,8 @@ part of the definition of the `my_webhook` action. <1> A watch level `transform` <2> An action level `transform` -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/search.asciidoc include::transform/search.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/script.asciidoc include::transform/script.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/chain.asciidoc include::transform/chain.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/transform/chain.asciidoc b/x-pack/docs/en/watcher/transform/chain.asciidoc index 4f7fad37256de..a00e86bac0765 100644 --- a/x-pack/docs/en/watcher/transform/chain.asciidoc +++ b/x-pack/docs/en/watcher/transform/chain.asciidoc @@ -1,15 +1,16 @@ +[role="xpack"] [[transform-chain]] -=== Chain Transform +=== Chain transform -A <> that executes an ordered list of configured transforms +A <> that executes an ordered list of configured transforms in a chain, where the output of one transform serves as the input of the next transform in the chain. The payload that is accepted by this transform serves as the input of the first transform in the chain and the output of the last transform in the chain is the output of the `chain` transform as a whole. You can use chain transforms to build more complex transforms out of the other -available transforms. For example, you can combine a <> -transform and a <> transform, as shown in the +available transforms. For example, you can combine a <> +transform and a <> transform, as shown in the following snippet: [source,js] diff --git a/x-pack/docs/en/watcher/transform/script.asciidoc b/x-pack/docs/en/watcher/transform/script.asciidoc index f1a46d482d9e6..24e1ae6bb34b4 100644 --- a/x-pack/docs/en/watcher/transform/script.asciidoc +++ b/x-pack/docs/en/watcher/transform/script.asciidoc @@ -1,14 +1,15 @@ +[role="xpack"] [[transform-script]] -=== Script Transform +=== Script transform -A <> that executes a script on the current payload in the +A <> that executes a script on the current payload in the watch execution context and replaces it with a newly generated one. The following snippet shows how a simple script transform can be defined on the watch level: TIP: The `script` transform is often useful when used in combination with the - <> transform, where the script can extract only + <> transform, where the script can extract only the significant data from a search result, and by that, keep the payload - minimal. This can be achieved with the <> + minimal. This can be achieved with the <> transform. @@ -32,33 +33,32 @@ NOTE: The executed script may either return a valid model that is the equivalent The `script` attribute may hold a string value in which case it will be treated as an inline script and the default elasticsearch script languages will be assumed -(as described in {ref}/modules-scripting.html#modules-scripting[here]). You can +(as described in <>). You can use the other scripting languages supported by Elasticsearch. For this, you need to set the `script` field to an object describing the script and its language. The following table lists the possible settings that can be configured: [[transform-script-settings]] -.Script Transform Settings -[options="header,footer"] +.Script transform settings +[options="header"] |====== | Name |Required | Default | Description -| `inline` | yes* | - | When using an inline script, this field holds +| `inline` | yes | - | When using an inline script, this field holds the script itself. -| `id` | yes* | - | When referring to a stored script, this +| `id` | yes | - | When referring to a stored script, this field holds the id of the script. | `lang` | no | `painless` | The script language | `params` | no | - | Additional parameters/variables that are accessible by the script - |====== When using the object notation of the script, one (and only one) of `inline`, -or `id` fields must be defined +or `id` fields must be defined. NOTE: In addition to the provided `params`, the scripts also have access to the - <>. + <>. diff --git a/x-pack/docs/en/watcher/transform/search.asciidoc b/x-pack/docs/en/watcher/transform/search.asciidoc index d7f468f183182..83a7ce553d660 100644 --- a/x-pack/docs/en/watcher/transform/search.asciidoc +++ b/x-pack/docs/en/watcher/transform/search.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[transform-search]] -=== Search Transform +=== Search transform -A <> that executes a search on the cluster and replaces +A <> that executes a search on the cluster and replaces the current payload in the watch execution context with the returned search response. The following snippet shows how a simple search transform can be defined on the watch level: @@ -47,35 +48,35 @@ execute a search over all events indices, matching events with `error` priority: The following table lists all available settings for the search transform: [[transform-search-settings]] -.Search Transform Settings +.Search transform settings [cols=",^,,", options="header"] |====== | Name |Required | Default | Description -| `request.search_type` | no | query_then_fetch | The search {ref}/search-request-search-type.html[type]. +| `request.search_type` | no | query_then_fetch | The search <>. | `request.indices` | no | all indices | One or more indices to search on. | `request.body` | no | `match_all` query | The body of the request. The - {ref}/search-request-body.html[request body] follows + <> follows the same structure you normally send in the body of a REST `_search` request. The body can be static text - or include `mustache` <>. + or include `mustache` <>. | `request.indices_options.expand_wildcards` | no | `open` | Determines how to expand indices wildcards. Can be one of `open`, `closed`, `none` or `all` - (see {ref}/multi-index.html[multi-index support]) + (see <>) | `request.indices_options.ignore_unavailable` | no | `true` | A boolean value that determines whether the search should leniently ignore unavailable indices - (see {ref}/multi-index.html[multi-index support]) + (see <>) | `request.indices_options.allow_no_indices` | no | `true` | A boolean value that determines whether the search should leniently return no results when no indices - are resolved (see {ref}/multi-index.html[multi-index support]) + are resolved (see <>) | `request.template` | no | - | The body of the search template. See - <> for more information. + <> for more information. | `timeout` | no | 30s | The timeout for waiting for the search api call to return. If no response is returned within this time, @@ -84,11 +85,11 @@ The following table lists all available settings for the search transform: |====== [[transform-search-template]] -==== Template Support +==== Template support The search transform support mustache <>. This can either be as part of the body definition, or alternatively, point to an existing -template (either defined in a file or {ref}/search-template.html#pre-registered-templates[registered] +template (either defined in a file or <> as a script in Elasticsearch). For example, the following snippet shows a search that refers to the scheduled @@ -129,7 +130,7 @@ time of the watch: // NOTCONSOLE The model of the template is a union between the provided `template.params` -settings and the <>. +settings and the <>. The following is an example of using templates that refer to provided parameters: diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc index af830e829a45e..a0852b0470e9a 100644 --- a/x-pack/docs/en/watcher/trigger.asciidoc +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[trigger]] == Triggers @@ -7,7 +8,6 @@ appropriate _Trigger Engine_. The trigger engine is responsible for evaluating the trigger and triggering the watch when needed. {watcher} is designed to support different types of triggers, but only time-based -<> triggers are currently available. +<> triggers are currently available. -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger/schedule.asciidoc include::trigger/schedule.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc index abbc3f5cfe8e5..0c9bffb9359c3 100644 --- a/x-pack/docs/en/watcher/trigger/schedule.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[trigger-schedule]] -=== Schedule Trigger +=== Schedule trigger -Schedule <> define when the watch execution should start based +Schedule <> define when the watch execution should start based on date and time. All times are specified in UTC time. {watcher} uses the system clock to determine the current time. To ensure schedules @@ -14,7 +15,7 @@ that's more frequent than the throttle period, the throttle period overrides the schedule. For example, if you set the throttle period to one minute (60000 ms) and set the schedule to every 10 seconds, the watch is executed no more than once per minute. For more information about throttling, see -<>. +<>. {watcher} provides several types of schedule triggers: @@ -26,23 +27,16 @@ once per minute. For more information about throttling, see * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/hourly.asciidoc include::schedule/hourly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/daily.asciidoc include::schedule/daily.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/weekly.asciidoc include::schedule/weekly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/monthly.asciidoc include::schedule/monthly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/yearly.asciidoc include::schedule/yearly.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/cron.asciidoc include::schedule/cron.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/interval.asciidoc include::schedule/interval.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index 57a6ebdfd92ef..578e20bec3891 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-cron]] ==== `cron` Schedule -A <> trigger that enables you to use a +A <> trigger that enables you to use a https://en.wikipedia.org/wiki/Cron[cron] style expression to specify when you want the scheduler to start the watch execution. {watcher} uses the cron parser from the http://www.quartz-scheduler.org[Quartz Job Scheduler]. For more @@ -12,10 +13,10 @@ WARNING: While `cron` triggers are super powerful, we recommend using one of the other schedule types if you can, as they are much more straightforward to configure. If you use `cron`, construct your `cron` expressions with care to be sure you are actually setting the schedule - you want. You can use the <> tool to validate + you want. You can use the <> tool to validate your cron expressions and see what the resulting trigger times will be. -===== Cron Expressions +===== Cron expressions A cron expression is a string of the following form: @@ -28,7 +29,7 @@ All elements are required except for `year`. <> shows the valid values for each element in a cron expression. [[schedule-cron-elements]] -.Cron Expression Elements +.Cron expression elements [cols=",^,,", options="header"] |====== | Name | Required | Valid Values | Valid Special Characters @@ -49,7 +50,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or `day_of_month`. Explicitly specifying both values is not supported. [[schedule-cron-special-characters]] -.Cron Special Characters +.Cron special characters [options="header"] |====== | Special Character | Description @@ -116,7 +117,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or |====== -.Setting Daily Triggers +.Setting daily triggers [options="header"] |====== | Cron Expression | Description @@ -124,7 +125,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or | `0 5 9 * * ? 2015` | Trigger at 9:05 AM every day during the year 2015. |====== -.Restricting Triggers to a Range of Days or Times +.Restricting triggers to a range of days or times [options="header"] |====== | Cron Expression | Description @@ -133,7 +134,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or at 9:05 AM every day. |====== -.Setting Interval Triggers +.Setting interval triggers [options="header"] |====== | Cron Expression | Description @@ -143,7 +144,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or on the first day of the month. |====== -.Setting Schedules that Trigger on a Particular Day +.Setting schedules that trigger on a particular day [options="header"] |====== | Cron Expression | Description @@ -156,7 +157,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or | `0 5 9 ? * 6#1` | Trigger at 9:05 AM on the first Friday of every month. |====== -.Setting Triggers Using Last +.Setting triggers using last [options="header"] |====== | Cron Expression | Description @@ -166,7 +167,7 @@ NOTE: Currently, you must specify `?` for either the `day_of_week` or |====== -===== Configuring a Cron Schedule +===== Configuring a cron schedule To configure a `cron` schedule, you simply specify the cron expression as a string value. For example, the following snippet configures a `cron` schedule @@ -186,7 +187,7 @@ that triggers every day at noon: -------------------------------------------------- // NOTCONSOLE -===== Configuring a Multiple Times Cron Schedule +===== Configuring a multiple times cron schedule To configure a `cron` schedule that triggers multiple times, you can specify an array of cron expressions. For example, the following `cron` diff --git a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc index e729335d59b29..6fb91d2a71253 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc @@ -1,18 +1,19 @@ +[role="xpack"] [[schedule-daily]] -==== Daily Schedule +==== Daily schedule -A <> that triggers at a particular time +A <> that triggers at a particular time every day. To use the `daily` schedule, you specify the time of day (or times) when you want the scheduler to start the watch execution with the `at` attribute. Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the reserved values `midnight` and `noon` for `00:00` and `12:00`, and -<>. +<>. NOTE: If you don't specify the `at` attribute for a `daily` schedule, it defaults to firing once daily at midnight, `00:00`. -===== Configuring a Daily Schedule +===== Configuring a daily schedule To configure a once a day schedule, you specify a single time with the `at` attribute. For example, the following `daily` schedule triggers once every @@ -30,7 +31,7 @@ day at 5:00 PM: -------------------------------------------------- // NOTCONSOLE -===== Configuring a Multiple Times Daily Schedule +===== Configuring a multiple times daily schedule To configure a `daily` schedule that triggers at multiple times during the day, you specify an array of times. For example, the following `daily` schedule @@ -49,7 +50,7 @@ triggers at `00:00`, `12:00`, and `17:00` every day. // NOTCONSOLE [[specifying-times-using-objects]] -===== Specifying Times Using Objects +===== Specifying times using objects In addition to using the `HH:mm` string syntax to specify times, you can specify a time as an object that has `hour` and `minute` attributes. diff --git a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc index 9ec750eebcd2b..17939096a461f 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-hourly]] -==== Hourly Schedule +==== Hourly schedule -A <> that triggers at a particular minute every +A <> that triggers at a particular minute every hour of the day. To use the `hourly` schedule, you specify the minute (or minutes) when you want the scheduler to start the watch execution with the `minute` attribute. @@ -10,7 +11,7 @@ NOTE: If you don't specify the `minute` attribute for an `hourly` schedule, it defaults to `0` and the schedule triggers on the hour every hour--`12:00`, `13:00`, `14:00`, and so on. -===== Configuring a Once an Hour Schedule +===== Configuring a once an hour schedule To configure a once an hour schedule, you specify a single time with the `minute` attribute. @@ -30,7 +31,7 @@ For example, the following `hourly` schedule triggers at minute 30 every hour-- -------------------------------------------------- // NOTCONSOLE -===== Configuring a Multiple Times Hourly Schedule +===== Configuring a multiple times hourly schedule To configure an `hourly` schedule that triggers at multiple times during the hour, you specify an array of minutes. For example, the following schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc index e534181ec0c2f..f4f8b4efd5a6f 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-interval]] -==== Interval Schedule +==== Interval schedule -A <> that triggers at a fixed time interval. The +A <> that triggers at a fixed time interval. The interval can be set in seconds, minutes, hours, days, or weeks: * `"Xs"` - trigger every `X` seconds. For example, `"30s"` means every 30 seconds. @@ -16,7 +17,7 @@ NOTE: The interval value differs from the standard _time value_ used in Elasticsearch. You cannot configure intervals in milliseconds or nanoseconds. -===== Configuring an Interval Schedule +===== Configuring an interval schedule To configure an `interval` schedule, you specify a string value that represents the interval. If you omit the unit of time (`s`,`m`, `h`, `d`, or `w`), it diff --git a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc index d2cfe409992a7..b8703ccdd2929 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-monthly]] -==== Monthly Schedule +==== Monthly schedule -A <> that triggers at a specific day and time +A <> that triggers at a specific day and time every month. To use the `monthly` schedule, you specify the day of the month and time (or days and times) when you want the scheduler to start the watch execution with the `on` and `at` attributes. @@ -10,7 +11,7 @@ You specify the day of month as a numeric value between `1` and `31` (inclusive) Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the reserved values `midnight` and `noon` for `00:00` and `12:00`. -===== Configuring a Monthly Schedule +===== Configuring a monthly schedule To configure a once a month schedule, you specify a single day and time with the `on` and `at` attributes. For example, the following `monthly` schedule triggers @@ -31,7 +32,7 @@ on the 10th of each month at noon: NOTE: You can also specify the day and time with the `day` and `time` attributes, they are interchangeable with `on` and `at`. -===== Configuring a Multiple Times Monthly Schedule +===== Configuring a multiple times monthly schedule To configure a `monthly` schedule that triggers multiple times a month, you can specify an array of day and time values. For example, the following `monthly` diff --git a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc index d6a403cb125c6..1408489d8d4b8 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-weekly]] -==== Weekly Schedule +==== Weekly schedule -A <> that triggers at a specific day and time +A <> that triggers at a specific day and time every week. To use the `weekly` schedule, you specify the day and time (or days and times) when you want the scheduler to start the watch execution with the `on` and `at` attributes. @@ -16,7 +17,7 @@ being the first day of the week): Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the reserved values `midnight` and `noon` for `00:00` and `12:00`. -===== Configuring a Weekly Schedule +===== Configuring a weekly schedule To configure a once a week schedule, you specify the day with the `on` attribute and the time with the `at` attribute. For example, the following `weekly` schedule @@ -37,7 +38,7 @@ triggers once a week on Friday at 5:00 PM: NOTE: You can also specify the day and time with the `day` and `time` attributes, they are interchangeable with `on` and `at`. -===== Configuring a Multiple Times Weekly Schedule +===== Configuring a multiple times weekly schedule To configure a `weekly` schedule that triggers multiple times a week, you can specify an array of day and time values. For example, the following `weekly` diff --git a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc index d11cc5d072787..e25d5561c86ff 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc @@ -1,7 +1,8 @@ +[role="xpack"] [[schedule-yearly]] -==== Yearly Schedule +==== Yearly schedule -A <> that triggers at a specific day and time +A <> that triggers at a specific day and time every year. To use the `yearly` schedule, you specify the month, day, and time (or months, days, and times) when you want the scheduler to start the watch execution with the `in`, `on`, and `at` attributes. @@ -20,7 +21,7 @@ You specify the day of month as a numeric value between `1` and `31` (inclusive) The Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the reserved values `midnight` and `noon` for `00:00` and `12:00`. -===== Configuring a Yearly Schedule +===== Configuring a yearly schedule To configure a once a year schedule, you specify the month with the `in` attribute, the day with the `on` attribute, and the time with the `at` attribute. For @@ -42,7 +43,7 @@ example, the following `yearly` schedule triggers once a year at noon on January NOTE: You can also specify the month, day, and time with the `month`, `day`, and `time` attributes, they are interchangeable with `in`, `on`, and `at`. -===== Configuring a Multiple Times Yearly Schedule +===== Configuring a multiple times yearly schedule To configure a `yearly` schedule that triggers multiple times a year, you can specify an array of month, day, and time values. For example, the following diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc new file mode 100644 index 0000000000000..37f0f6ad3618a --- /dev/null +++ b/x-pack/docs/en/watcher/troubleshooting.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[testenv="gold"] +[[watcher-troubleshooting]] +== Troubleshooting {watcher} +[subs="attributes"] +++++ +{watcher} +++++ + +[float] +=== Dynamic mapping error when trying to add a watch + +If you get the _Dynamic Mapping is Disabled_ error when you try to add a watch, +verify that the index mappings for the `.watches` index are available. You can +do that by submitting the following request: + +[source,js] +-------------------------------------------------- +GET .watches/_mapping +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +If the index mappings are missing, follow these steps to restore the correct +mappings: + +. Stop the Elasticsearch node. +. Add `xpack.watcher.index.rest.direct_access : true` to `elasticsearch.yml`. +. Restart the Elasticsearch node. +. Delete the `.watches` index: ++ +-- +[source,js] +-------------------------------------------------- +DELETE .watches +-------------------------------------------------- +// CONSOLE +// TEST[skip:index deletion] +-- +. Disable direct access to the `.watches` index: +.. Stop the Elasticsearch node. +.. Remove `xpack.watcher.index.rest.direct_access : true` from `elasticsearch.yml`. +.. Restart the Elasticsearch node. + +[float] +=== Unable to send email + +If you get an authentication error indicating that you need to continue the +sign-in process from a web browser when Watcher attempts to send email, you need +to configure Gmail to +https://support.google.com/accounts/answer/6010255?hl=en[Allow Less Secure Apps to access your account]. + +If you have two-step verification enabled for your email account, you must +generate and use an App Specific password to send email from {watcher}. For more +information, see: + +- Gmail: https://support.google.com/accounts/answer/185833?hl=en[Sign in using App Passwords] +- Outlook.com: http://windows.microsoft.com/en-us/windows/app-passwords-two-step-verification[App passwords and two-step verification] + +[float] +=== {watcher} not responsive + +Keep in mind that there's no built-in validation of scripts that you add to a +watch. Buggy or deliberately malicious scripts can negatively impact {watcher} +performance. For example, if you add multiple watches with buggy script +conditions in a short period of time, {watcher} might be temporarily unable to +process watches until the bad watches time out. diff --git a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java index e57f57174a883..ba3516d4f2e8d 100644 --- a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java +++ b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java @@ -79,13 +79,13 @@ public void reenableWatcher() throws Exception { if (isWatcherTest()) { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index b8ed9f55932cc..f4f75ab802080 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -20,15 +18,15 @@ compileTestJava.options.compilerArgs << "-Xlint:-try" // Instead we create a separate task to run the // tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Java fantasy integration tests', - dependsOn: unitTest.dependsOn) { +task internalClusterTest(type: Test) { + description = 'Java fantasy integration tests' + mustRunAfter test + include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test // add all sub-projects of the qa sub-project gradle.projectsEvaluated { diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index a70f1cbd0a738..d3e95d997c3fb 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { compile project(':test:framework') diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index bba9709087a56..cbf30b54d5fdb 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -53,11 +53,11 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 5680eb41f3877..7c9c581c5be19 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -36,7 +36,7 @@ middleClusterTestCluster { middleClusterTestRunner { systemProperty 'tests.target_cluster', 'middle' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" } task followClusterTest(type: RestIntegTestTask) {} @@ -54,11 +54,11 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'tests.middle_host', "${-> middleClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.middle_host', "${-> middleClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' finalizedBy 'middleClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java index 84271ce0acaf1..b429657fe57cf 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectPath; import java.io.IOException; import java.util.Map; @@ -41,6 +42,9 @@ public void testXPackCcrUsage() throws Exception { assertThat(ccrUsage.get("follower_indices_count"), equalTo(previousFollowerIndicesCount + 2)); assertThat(ccrUsage.get("auto_follow_patterns_count"), equalTo(previousAutoFollowPatternsCount + 1)); assertThat((Integer) ccrUsage.get("last_follow_time_in_millis"), greaterThanOrEqualTo(0)); + // We need to wait until index following is active for auto followed indices: + // (otherwise pause follow may fail, if there are no shard follow tasks, in case this test gets executed too quickly) + assertIndexFollowingActive("messages-20200101"); }); deleteAutoFollowPattern("my_pattern"); @@ -82,4 +86,13 @@ private void createLeaderIndex(String indexName) throws IOException { return (Map) response.get("ccr"); } + private void assertIndexFollowingActive(String expectedFollowerIndex) throws IOException { + Request statsRequest = new Request("GET", "/" + expectedFollowerIndex + "/_ccr/info"); + Map response = toMap(client().performRequest(statsRequest)); + String actualFollowerIndex = ObjectPath.eval("follower_indices.0.follower_index", response); + assertThat(actualFollowerIndex, equalTo(expectedFollowerIndex)); + String followStatus = ObjectPath.eval("follower_indices.0.status", response); + assertThat(followStatus, equalTo("active")); + } + } diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index d3044f760faf7..6d294c4075595 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -33,9 +33,9 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle index c890064504b51..b06535a17c096 100644 --- a/x-pack/plugin/ccr/qa/rest/build.gradle +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -37,4 +37,4 @@ restTestCluster { } check.dependsOn restTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/forget_follower.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/forget_follower.yml new file mode 100644 index 0000000000000..08475a0026aef --- /dev/null +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/forget_follower.yml @@ -0,0 +1,80 @@ +--- +"Test forget follower": + - do: + cluster.state: {} + + - set: {master_node: master} + + - do: + nodes.info: {} + + - set: {nodes.$master.transport_address: local_ip} + + - do: + cluster.put_settings: + body: + transient: + cluster.remote.remote_cluster.seeds: $local_ip + flat_settings: true + + - match: {transient: {cluster.remote.remote_cluster.seeds: $local_ip}} + + - do: + indices.create: + index: leader_index + body: + settings: + index: + number_of_shards: 1 + soft_deletes: + enabled: true + - is_true: acknowledged + + - do: + ccr.follow: + index: follower_index + wait_for_active_shards: 1 + body: + remote_cluster: remote_cluster + leader_index: leader_index + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started + + - do: + info: {} + + - set: {cluster_name: cluster_name} + + - do: + indices.stats: {index: follower_index} + + - set: {indices.follower_index.uuid: follower_index_uuid} + + - do: + ccr.forget_follower: + index: leader_index + body: + follower_cluster: $cluster_name + follower_index: follower_index + follower_index_uuid: $follower_index_uuid + leader_remote_cluster: remote_cluster + - match: { _shards.total: 1 } + - match: { _shards.successful: 1} + - match: { _shards.failed: 0} + - is_false: _shards.failure + + - do: + ccr.pause_follow: + index: follower_index + - is_true: acknowledged + + - do: + indices.close: + index: follower_index + - is_true: acknowledged + + - do: + ccr.unfollow: + index: follower_index + - is_true: acknowledged diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle new file mode 100644 index 0000000000000..8501de714fae6 --- /dev/null +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -0,0 +1,61 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(':x-pack:plugin:ccr:qa') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'node.name', 'leader' +} + +leaderClusterTestRunner { + systemProperty 'tests.target_cluster', 'leader' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.monitoring.collection.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" + setting 'node.name', 'follow' +} + +followClusterTestRunner { + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" +} + +task followClusterRestartTest(type: RestIntegTestTask) {} + +followClusterRestartTestCluster { + dependsOn followClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + dataDir = { nodeNumber -> followClusterTest.nodes[0].dataDir } + setting 'xpack.monitoring.collection.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" + setting 'node.name', 'follow' +} + +followClusterRestartTestRunner { + systemProperty 'tests.target_cluster', 'follow-restart' + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterRestartTest +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/restart/src/test/java/org/elasticsearch/xpack/ccr/RestartIT.java b/x-pack/plugin/ccr/qa/restart/src/test/java/org/elasticsearch/xpack/ccr/RestartIT.java new file mode 100644 index 0000000000000..cbbc6945034c0 --- /dev/null +++ b/x-pack/plugin/ccr/qa/restart/src/test/java/org/elasticsearch/xpack/ccr/RestartIT.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; + +import java.io.IOException; + +public class RestartIT extends ESCCRRestTestCase { + + public void testRestart() throws Exception { + final int numberOfDocuments = 128; + final String testsTargetCluster = System.getProperty("tests.target_cluster"); + switch (testsTargetCluster) { + case "leader": { + // create a single index "leader" on the leader + createIndexAndIndexDocuments("leader", numberOfDocuments, client()); + break; + } + case "follow": { + // follow "leader" with "follow-leader" on the follower + followIndex("leader", "follow-leader"); + verifyFollower("follow-leader", numberOfDocuments, client()); + + // now create an auto-follow pattern for "leader-*" + final Request putPatternRequest = new Request("PUT", "/_ccr/auto_follow/leader_cluster_pattern"); + putPatternRequest.setJsonEntity("{" + + "\"leader_index_patterns\": [\"leader-*\"]," + + "\"remote_cluster\": \"leader_cluster\"," + + "\"follow_index_pattern\":\"follow-{{leader_index}}\"}"); + assertOK(client().performRequest(putPatternRequest)); + try (RestClient leaderClient = buildLeaderClient()) { + // create "leader-1" on the leader, which should be replicated to "follow-leader-1" on the follower + createIndexAndIndexDocuments("leader-1", numberOfDocuments, leaderClient); + // the follower should catch up + verifyFollower("follow-leader-1", numberOfDocuments, client()); + } + break; + } + case "follow-restart": { + try (RestClient leaderClient = buildLeaderClient()) { + // create "leader-2" on the leader, and index some additional documents into existing indices + createIndexAndIndexDocuments("leader-2", numberOfDocuments, leaderClient); + for (final String index : new String[]{"leader", "leader-1", "leader-2"}) { + indexDocuments(index, numberOfDocuments, numberOfDocuments, leaderClient); + } + // the followers should catch up + for (final String index : new String[]{"follow-leader", "follow-leader-1", "follow-leader-2"}) { + logger.info("verifying {} using {}", index, client().getNodes()); + verifyFollower(index, 2 * numberOfDocuments, client()); + } + // one more index "leader-3" on the follower + createIndexAndIndexDocuments("leader-3", 2 * numberOfDocuments, leaderClient); + // the follower should catch up + verifyFollower("follow-leader-3", 2 * numberOfDocuments, client()); + } + break; + } + default: { + throw new IllegalArgumentException("unexpected value [" + testsTargetCluster + "] for tests.target_cluster"); + } + } + } + + private void createIndexAndIndexDocuments(final String index, final int numberOfDocuments, final RestClient client) throws IOException { + final Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + final Request createIndexRequest = new Request("PUT", "/" + index); + createIndexRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings) + "}"); + assertOK(client.performRequest(createIndexRequest)); + indexDocuments(index, numberOfDocuments, 0, client); + } + + private void indexDocuments( + final String index, + final int numberOfDocuments, + final int initial, + final RestClient client) throws IOException { + for (int i = 0, j = initial; i < numberOfDocuments; i++, j++) { + index(client, index, Integer.toString(j), "field", j); + } + assertOK(client.performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + private void verifyFollower(final String index, final int numberOfDocuments, final RestClient client) throws Exception { + assertBusy(() -> { + ensureYellow(index, client); + verifyDocuments(index, numberOfDocuments, "*:*", client); + }); + } + +} diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index af4238c20075e..0e082f51d71a1 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -22,7 +22,7 @@ leaderClusterTestCluster { setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" setupCommand 'setupCcrUser', - 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "manage_ccr" + 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "ccruser" waitCondition = { node, ant -> File tmpFile = new File(node.cwd, 'wait.success') ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", @@ -68,9 +68,9 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/security/follower-roles.yml b/x-pack/plugin/ccr/qa/security/follower-roles.yml index be3e6cf5e1755..4a91c072043bb 100644 --- a/x-pack/plugin/ccr/qa/security/follower-roles.yml +++ b/x-pack/plugin/ccr/qa/security/follower-roles.yml @@ -2,7 +2,7 @@ ccruser: cluster: - manage_ccr indices: - - names: [ 'allowed-index', 'logs-eu-*' ] + - names: [ 'allowed-index', 'forget-follower', 'logs-eu-*' ] privileges: - monitor - read diff --git a/x-pack/plugin/ccr/qa/security/leader-roles.yml b/x-pack/plugin/ccr/qa/security/leader-roles.yml index 99fa62cbe832b..944af38b92ce5 100644 --- a/x-pack/plugin/ccr/qa/security/leader-roles.yml +++ b/x-pack/plugin/ccr/qa/security/leader-roles.yml @@ -2,7 +2,8 @@ ccruser: cluster: - read_ccr indices: - - names: [ 'allowed-index', 'logs-eu-*' ] + - names: [ 'allowed-index', 'forget-leader', 'logs-eu-*' ] privileges: - monitor - read + - manage_leader_index diff --git a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 91b94f1c4b57d..cb54248ee3dbc 100644 --- a/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; @@ -13,14 +14,19 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; public class FollowIndexSecurityIT extends ESCCRRestTestCase { @@ -176,4 +182,55 @@ public void testAutoFollowPatterns() throws Exception { pauseFollow(client(), allowedIndex); } + public void testForgetFollower() throws IOException { + final String forgetLeader = "forget-leader"; + final String forgetFollower = "forget-follower"; + if ("leader".equals(targetCluster)) { + logger.info("running against leader cluster"); + final Settings indexSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 1) + .put("index.soft_deletes.enabled", true) + .build(); + createIndex(forgetLeader, indexSettings); + } else { + logger.info("running against follower cluster"); + followIndex(client(), "leader_cluster", forgetLeader, forgetFollower); + + final Response response = client().performRequest(new Request("GET", "/" + forgetFollower + "/_stats")); + final String followerIndexUUID = ObjectPath.createFromResponse(response).evaluate("indices." + forgetFollower + ".uuid"); + + assertOK(client().performRequest(new Request("POST", "/" + forgetFollower + "/_ccr/pause_follow"))); + + try (RestClient leaderClient = buildLeaderClient(restClientSettings())) { + final Request request = new Request("POST", "/" + forgetLeader + "/_ccr/forget_follower"); + final String requestBody = "{" + + "\"follower_cluster\":\"follow-cluster\"," + + "\"follower_index\":\"" + forgetFollower + "\"," + + "\"follower_index_uuid\":\"" + followerIndexUUID + "\"," + + "\"leader_remote_cluster\":\"leader_cluster\"" + + "}"; + request.setJsonEntity(requestBody); + final Response forgetFollowerResponse = leaderClient.performRequest(request); + assertOK(forgetFollowerResponse); + final Map shards = ObjectPath.createFromResponse(forgetFollowerResponse).evaluate("_shards"); + assertNull(shards.get("failures")); + assertThat(shards.get("total"), equalTo(1)); + assertThat(shards.get("successful"), equalTo(1)); + assertThat(shards.get("failed"), equalTo(0)); + + final Request retentionLeasesRequest = new Request("GET", "/" + forgetLeader + "/_stats"); + retentionLeasesRequest.addParameter("level", "shards"); + final Response retentionLeasesResponse = leaderClient.performRequest(retentionLeasesRequest); + final ArrayList shardsStats = + ObjectPath.createFromResponse(retentionLeasesResponse).evaluate("indices." + forgetLeader + ".shards.0"); + assertThat(shardsStats, hasSize(1)); + final Map shardStatsAsMap = (Map) shardsStats.get(0); + final Map retentionLeasesStats = (Map) shardStatsAsMap.get("retention_leases"); + final List leases = (List) retentionLeasesStats.get("leases"); + assertThat(leases, empty()); + } + } + } + } diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index 656328d5ead9e..33e7c2f2bf177 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -28,6 +28,7 @@ import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class ESCCRRestTestCase extends ESRestTestCase { @@ -119,13 +120,13 @@ protected static void verifyDocuments(final String index, Map response = toMap(client.performRequest(request)); int numDocs = (int) XContentMapValues.extractValue("hits.total", response); - assertThat(numDocs, equalTo(expectedNumDocs)); + assertThat(index, numDocs, equalTo(expectedNumDocs)); List hits = (List) XContentMapValues.extractValue("hits.hits", response); assertThat(hits.size(), equalTo(expectedNumDocs)); for (int i = 0; i < expectedNumDocs; i++) { int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); - assertThat(i, equalTo(value)); + assertThat(index, i, equalTo(value)); } } @@ -139,8 +140,9 @@ protected static void verifyCcrMonitoring(final String expectedLeaderIndex, fina throw new AssertionError("error while searching", e); } - int numberOfOperationsReceived = 0; - int numberOfOperationsIndexed = 0; + int followerMaxSeqNo = 0; + int followerMappingVersion = 0; + int followerSettingsVersion = 0; List hits = (List) XContentMapValues.extractValue("hits.hits", response); assertThat(hits.size(), greaterThanOrEqualTo(1)); @@ -153,16 +155,20 @@ protected static void verifyCcrMonitoring(final String expectedLeaderIndex, fina final String followerIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.follower_index", hit); assertThat(followerIndex, equalTo(expectedFollowerIndex)); - int foundNumberOfOperationsReceived = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_read", hit); - numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); - int foundNumberOfOperationsIndexed = - (int) XContentMapValues.extractValue("_source.ccr_stats.operations_written", hit); - numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + int foundFollowerMaxSeqNo = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_max_seq_no", hit); + followerMaxSeqNo = Math.max(followerMaxSeqNo, foundFollowerMaxSeqNo); + int foundFollowerMappingVersion = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_mapping_version", hit); + followerMappingVersion = Math.max(followerMappingVersion, foundFollowerMappingVersion); + int foundFollowerSettingsVersion = + (int) XContentMapValues.extractValue("_source.ccr_stats.follower_settings_version", hit); + followerSettingsVersion = Math.max(followerSettingsVersion, foundFollowerSettingsVersion); } - assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); - assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + assertThat(followerMaxSeqNo, greaterThan(0)); + assertThat(followerMappingVersion, greaterThan(0)); + assertThat(followerSettingsVersion, greaterThan(0)); } protected static void verifyAutoFollowMonitoring() throws IOException { @@ -199,15 +205,19 @@ protected static Map toMap(String response) { return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); } - protected static void ensureYellow(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); + protected static void ensureYellow(final String index) throws IOException { + ensureYellow(index, adminClient()); + } + + protected static void ensureYellow(final String index, final RestClient client) throws IOException { + final Request request = new Request("GET", "/_cluster/health/" + index); request.addParameter("wait_for_status", "yellow"); request.addParameter("wait_for_active_shards", "1"); request.addParameter("wait_for_no_relocating_shards", "true"); request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("timeout", "70s"); + request.addParameter("timeout", "5s"); request.addParameter("level", "shards"); - adminClient().performRequest(request); + client.performRequest(request); } protected int countCcrNodeTasks() throws IOException { @@ -249,16 +259,25 @@ protected RestClient buildLeaderClient() throws IOException { return buildClient(System.getProperty("tests.leader_host")); } + protected RestClient buildLeaderClient(final Settings settings) throws IOException { + assert "leader".equals(targetCluster) == false; + return buildClient(System.getProperty("tests.leader_host"), settings); + } + protected RestClient buildMiddleClient() throws IOException { assert "middle".equals(targetCluster) == false; return buildClient(System.getProperty("tests.middle_host")); } private RestClient buildClient(final String url) throws IOException { + return buildClient(url, restAdminSettings()); + } + + private RestClient buildClient(final String url, final Settings settings) throws IOException { int portSeparator = url.lastIndexOf(':'); HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); - return buildClient(restAdminSettings(), new HttpHost[]{httpHost}); + Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + return buildClient(settings, new HttpHost[]{httpHost}); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index a7fa69e7abd39..7fa7f37f4b71d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.MappingRequestValidator; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; @@ -55,6 +56,7 @@ import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportFollowInfoAction; import org.elasticsearch.xpack.ccr.action.TransportFollowStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportForgetFollowerAction; import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportPauseFollowAction; import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; @@ -75,6 +77,7 @@ import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestFollowInfoAction; import org.elasticsearch.xpack.ccr.rest.RestFollowStatsAction; +import org.elasticsearch.xpack.ccr.rest.RestForgetFollowerAction; import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestPauseFollowAction; import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; @@ -82,12 +85,14 @@ import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; @@ -127,6 +132,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final CcrLicenseChecker ccrLicenseChecker; private final SetOnce restoreSourceService = new SetOnce<>(); private final SetOnce ccrSettings = new SetOnce<>(); + private final SetOnce threadPool = new SetOnce<>(); private Client client; private final boolean transportClientMode; @@ -171,15 +177,20 @@ public Collection createComponents( CcrSettings ccrSettings = new CcrSettings(settings, clusterService.getClusterSettings()); this.ccrSettings.set(ccrSettings); + this.threadPool.set(threadPool); CcrRestoreSourceService restoreSourceService = new CcrRestoreSourceService(threadPool, ccrSettings); this.restoreSourceService.set(restoreSourceService); return Arrays.asList( - ccrLicenseChecker, - restoreSourceService, - new CcrRepositoryManager(settings, clusterService, client), - new AutoFollowCoordinator(settings, client, clusterService, ccrLicenseChecker, - threadPool::relativeTimeInMillis, threadPool::absoluteTimeInMillis) - ); + ccrLicenseChecker, + restoreSourceService, + new CcrRepositoryManager(settings, clusterService, client), + new AutoFollowCoordinator( + settings, + client, + clusterService, + ccrLicenseChecker, + threadPool::relativeTimeInMillis, + threadPool::absoluteTimeInMillis)); } @Override @@ -221,7 +232,9 @@ public List> getPersistentTasksExecutor(ClusterServic // auto-follow actions new ActionHandler<>(DeleteAutoFollowPatternAction.INSTANCE, TransportDeleteAutoFollowPatternAction.class), new ActionHandler<>(PutAutoFollowPatternAction.INSTANCE, TransportPutAutoFollowPatternAction.class), - new ActionHandler<>(GetAutoFollowPatternAction.INSTANCE, TransportGetAutoFollowPatternAction.class)); + new ActionHandler<>(GetAutoFollowPatternAction.INSTANCE, TransportGetAutoFollowPatternAction.class), + // forget follower action + new ActionHandler<>(ForgetFollowerAction.INSTANCE, TransportForgetFollowerAction.class)); } public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, @@ -245,14 +258,16 @@ public List getRestHandlers(Settings settings, RestController restC // auto-follow APIs new RestDeleteAutoFollowPatternAction(settings, restController), new RestPutAutoFollowPatternAction(settings, restController), - new RestGetAutoFollowPatternAction(settings, restController)); + new RestGetAutoFollowPatternAction(settings, restController), + // forget follower API + new RestForgetFollowerAction(settings, restController)); } public List getNamedWriteables() { return Arrays.asList( // Persistent action requests new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ShardFollowTask.NAME, - ShardFollowTask::new), + ShardFollowTask::readFrom), // Task statuses new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTaskStatus.STATUS_PARSER_NAME, @@ -262,11 +277,17 @@ public List getNamedWriteables() { public List getNamedXContent() { return Arrays.asList( - // Persistent action requests - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(ShardFollowTask.NAME), + // auto-follow metadata, persisted into the cluster state as XContent + new NamedXContentRegistry.Entry( + MetaData.Custom.class, + new ParseField(AutoFollowMetadata.TYPE), + AutoFollowMetadata::fromXContent), + // persistent action requests + new NamedXContentRegistry.Entry( + PersistentTaskParams.class, + new ParseField(ShardFollowTask.NAME), ShardFollowTask::fromXContent), - - // Task statuses + // task statuses new NamedXContentRegistry.Entry( ShardFollowNodeTaskStatus.class, new ParseField(ShardFollowNodeTaskStatus.STATUS_PARSER_NAME), @@ -307,7 +328,7 @@ public List> getExecutorBuilders(Settings settings) { @Override public Map getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { Repository.Factory repositoryFactory = - (metadata) -> new CcrRepository(metadata, client, ccrLicenseChecker, settings, ccrSettings.get()); + (metadata) -> new CcrRepository(metadata, client, ccrLicenseChecker, settings, ccrSettings.get(), threadPool.get()); return Collections.singletonMap(CcrRepository.TYPE, repositoryFactory); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java index c1a28b72cf8fe..c241c7a9aa070 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction; import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryRequest; @@ -75,7 +76,8 @@ void init() { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses, String proxy) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxy, boolean compressionEnabled, + TimeValue pingSchedule) { String repositoryName = CcrRepository.NAME_PREFIX + clusterAlias; if (addresses.isEmpty()) { deleteRepository(repositoryName); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java new file mode 100644 index 0000000000000..7f165a8cf5d56 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRetentionLeases.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.index.shard.ShardId; + +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +public class CcrRetentionLeases { + + // this setting is intentionally not registered, it is only used in tests + public static final Setting RETENTION_LEASE_RENEW_INTERVAL_SETTING = + Setting.timeSetting( + "index.ccr.retention_lease.renew_interval", + new TimeValue(30, TimeUnit.SECONDS), + new TimeValue(0, TimeUnit.MILLISECONDS), + Setting.Property.NodeScope); + + /** + * The retention lease ID used by followers. + * + * @param localClusterName the local cluster name + * @param followerIndex the follower index + * @param remoteClusterAlias the remote cluster alias + * @param leaderIndex the leader index + * @return the retention lease ID + */ + public static String retentionLeaseId( + final String localClusterName, + final Index followerIndex, + final String remoteClusterAlias, + final Index leaderIndex) { + return String.format( + Locale.ROOT, + "%s/%s/%s-following-%s/%s/%s", + localClusterName, + followerIndex.getName(), + followerIndex.getUUID(), + remoteClusterAlias, + leaderIndex.getName(), + leaderIndex.getUUID()); + } + + /** + * Synchronously requests to add a retention lease with the specified retention lease ID on the specified leader shard using the given + * remote client. Note that this method will block up to the specified timeout. + * + * @param leaderShardId the leader shard ID + * @param retentionLeaseId the retention lease ID + * @param retainingSequenceNumber the retaining sequence number + * @param remoteClient the remote client on which to execute this request + * @param timeout the timeout + * @return an optional exception indicating whether or not the retention lease already exists + */ + public static Optional syncAddRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final long retainingSequenceNumber, + final Client remoteClient, + final TimeValue timeout) { + try { + final PlainActionFuture response = new PlainActionFuture<>(); + asyncAddRetentionLease(leaderShardId, retentionLeaseId, retainingSequenceNumber, remoteClient, response); + response.actionGet(timeout); + return Optional.empty(); + } catch (final RetentionLeaseAlreadyExistsException e) { + return Optional.of(e); + } + } + + /** + * Asynchronously requests to add a retention lease with the specified retention lease ID on the specified leader shard using the given + * remote client. Note that this method will return immediately, with the specified listener callback invoked to indicate a response + * or failure. + * + * @param leaderShardId the leader shard ID + * @param retentionLeaseId the retention lease ID + * @param retainingSequenceNumber the retaining sequence number + * @param remoteClient the remote client on which to execute this request + * @param listener the listener + */ + public static void asyncAddRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final long retainingSequenceNumber, + final Client remoteClient, + final ActionListener listener) { + final RetentionLeaseActions.AddRequest request = + new RetentionLeaseActions.AddRequest(leaderShardId, retentionLeaseId, retainingSequenceNumber, "ccr"); + remoteClient.execute(RetentionLeaseActions.Add.INSTANCE, request, listener); + } + + /** + * Synchronously requests to renew a retention lease with the specified retention lease ID on the specified leader shard using the given + * remote client. Note that this method will block up to the specified timeout. + * + * @param leaderShardId the leader shard ID + * @param retentionLeaseId the retention lease ID + * @param retainingSequenceNumber the retaining sequence number + * @param remoteClient the remote client on which to execute this request + * @param timeout the timeout + * @return an optional exception indicating whether or not the retention lease already exists + */ + public static Optional syncRenewRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final long retainingSequenceNumber, + final Client remoteClient, + final TimeValue timeout) { + try { + final PlainActionFuture response = new PlainActionFuture<>(); + asyncRenewRetentionLease(leaderShardId, retentionLeaseId, retainingSequenceNumber, remoteClient, response); + response.actionGet(timeout); + return Optional.empty(); + } catch (final RetentionLeaseNotFoundException e) { + return Optional.of(e); + } + } + + /** + * Asynchronously requests to renew a retention lease with the specified retention lease ID on the specified leader shard using the + * given remote client. Note that this method will return immediately, with the specified listener callback invoked to indicate a + * response or failure. + * + * @param leaderShardId the leader shard ID + * @param retentionLeaseId the retention lease ID + * @param retainingSequenceNumber the retaining sequence number + * @param remoteClient the remote client on which to execute this request + * @param listener the listener + */ + public static void asyncRenewRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final long retainingSequenceNumber, + final Client remoteClient, + final ActionListener listener) { + final RetentionLeaseActions.RenewRequest request = + new RetentionLeaseActions.RenewRequest(leaderShardId, retentionLeaseId, retainingSequenceNumber, "ccr"); + remoteClient.execute(RetentionLeaseActions.Renew.INSTANCE, request, listener); + } + + /** + * Asynchronously requests to remove a retention lease with the specified retention lease ID on the specified leader shard using the + * given remote client. Note that this method will return immediately, with the specified listener callback invoked to indicate a + * response or failure. + * + * @param leaderShardId the leader shard ID + * @param retentionLeaseId the retention lease ID + * @param remoteClient the remote client on which to execute this request + * @param listener the listener + */ + public static void asyncRemoveRetentionLease( + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient, + final ActionListener listener) { + final RetentionLeaseActions.RemoveRequest request = new RetentionLeaseActions.RemoveRequest(leaderShardId, retentionLeaseId); + remoteClient.execute(RetentionLeaseActions.Remove.INSTANCE, request, listener); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index 0e147f66d6ebc..9abcfb86e2b7c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -57,6 +57,12 @@ public final class CcrSettings { new ByteSizeValue(1, ByteSizeUnit.KB), new ByteSizeValue(1, ByteSizeUnit.GB), Setting.Property.Dynamic, Setting.Property.NodeScope); + /** + * Controls the maximum number of file chunk requests that are sent concurrently per recovery to the leader. + */ + public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING = + Setting.intSetting("ccr.indices.recovery.max_concurrent_file_chunks", 5, 1, 10, Property.Dynamic, Property.NodeScope); + /** * The leader must open resources for a ccr recovery. If there is no activity for this interval of time, * the leader will close the restore session. @@ -77,7 +83,7 @@ public final class CcrSettings { * * @return the settings */ - static List> getSettings() { + public static List> getSettings() { return Arrays.asList( XPackSettings.CCR_ENABLED_SETTING, CCR_FOLLOWING_INDEX_SETTING, @@ -86,6 +92,7 @@ static List> getSettings() { INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT, RECOVERY_CHUNK_SIZE, + INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, CCR_WAIT_FOR_METADATA_TIMEOUT); } @@ -93,14 +100,17 @@ static List> getSettings() { private volatile TimeValue recoveryActivityTimeout; private volatile TimeValue recoveryActionTimeout; private volatile ByteSizeValue chunkSize; + private volatile int maxConcurrentFileChunks; public CcrSettings(Settings settings, ClusterSettings clusterSettings) { this.recoveryActivityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); this.recoveryActionTimeout = INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.get(settings); this.ccrRateLimiter = new CombinedRateLimiter(RECOVERY_MAX_BYTES_PER_SECOND.get(settings)); this.chunkSize = RECOVERY_MAX_BYTES_PER_SECOND.get(settings); + this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(RECOVERY_MAX_BYTES_PER_SECOND, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(RECOVERY_CHUNK_SIZE, this::setChunkSize); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setRecoveryActivityTimeout); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTION_TIMEOUT_SETTING, this::setRecoveryActionTimeout); } @@ -109,6 +119,10 @@ private void setChunkSize(ByteSizeValue chunkSize) { this.chunkSize = chunkSize; } + private void setMaxConcurrentFileChunks(int maxConcurrentFileChunks) { + this.maxConcurrentFileChunks = maxConcurrentFileChunks; + } + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { ccrRateLimiter.setMBPerSec(maxBytesPerSec); } @@ -125,6 +139,10 @@ public ByteSizeValue getChunkSize() { return chunkSize; } + public int getMaxConcurrentFileChunks() { + return maxConcurrentFileChunks; + } + public CombinedRateLimiter getRateLimiter() { return ccrRateLimiter; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 03e936ca8c2ea..dade0b3d9d74c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -26,6 +25,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.CopyOnWriteHashMap; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -66,7 +67,7 @@ * A component that runs only on the elected master node and follows leader indices automatically * if they match with a auto follow pattern that is defined in {@link AutoFollowMetadata}. */ -public class AutoFollowCoordinator implements ClusterStateListener { +public class AutoFollowCoordinator extends AbstractLifecycleComponent implements ClusterStateListener { private static final Logger LOGGER = LogManager.getLogger(AutoFollowCoordinator.class); private static final int MAX_AUTO_FOLLOW_ERRORS = 256; @@ -99,7 +100,6 @@ public AutoFollowCoordinator( this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker, "ccrLicenseChecker"); this.relativeMillisTimeProvider = relativeMillisTimeProvider; this.absoluteMillisTimeProvider = absoluteMillisTimeProvider; - clusterService.addListener(this); this.recentAutoFollowErrors = new LinkedHashMap>() { @Override protected boolean removeEldestEntry(final Map.Entry> eldest) { @@ -117,6 +117,27 @@ protected boolean removeEldestEntry(final Map.Entry autoFollowers = this.autoFollowers; final TreeMap timesSinceLastAutoFollowPerRemoteCluster = new TreeMap<>(); @@ -246,8 +267,10 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }; newAutoFollowers.put(remoteCluster, autoFollower); - LOGGER.info("starting auto follower for remote cluster [{}]", remoteCluster); - autoFollower.start(); + LOGGER.info("starting auto-follower for remote cluster [{}]", remoteCluster); + if (lifecycleState() == Lifecycle.State.STARTED) { + autoFollower.start(); + } } List removedRemoteClusters = new ArrayList<>(); @@ -257,13 +280,15 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS boolean exist = autoFollowMetadata.getPatterns().values().stream() .anyMatch(pattern -> pattern.getRemoteCluster().equals(remoteCluster)); if (exist == false) { - LOGGER.info("removing auto follower for remote cluster [{}]", remoteCluster); + LOGGER.info("removing auto-follower for remote cluster [{}]", remoteCluster); autoFollower.removed = true; removedRemoteClusters.add(remoteCluster); } else if (autoFollower.remoteClusterConnectionMissing) { - LOGGER.info("retrying auto follower [{}] after remote cluster connection was missing", remoteCluster); + LOGGER.info("retrying auto-follower for remote cluster [{}] after remote cluster connection was missing", remoteCluster); autoFollower.remoteClusterConnectionMissing = false; - autoFollower.start(); + if (lifecycleState() == Lifecycle.State.STARTED) { + autoFollower.start(); + } } } assert assertNoOtherActiveAutoFollower(newAutoFollowers); @@ -313,6 +338,7 @@ abstract static class AutoFollower { volatile boolean removed = false; private volatile CountDown autoFollowPatternsCountDown; private volatile AtomicArray autoFollowResults; + private volatile boolean stop; AutoFollower(final String remoteCluster, final Consumer> statsUpdater, @@ -325,6 +351,10 @@ abstract static class AutoFollower { } void start() { + if (stop) { + LOGGER.trace("auto-follower is stopped for remote cluster [{}]", remoteCluster); + return; + } if (removed) { // This check exists to avoid two AutoFollower instances a single remote cluster. // (If an auto follow pattern is deleted and then added back quickly enough then @@ -366,6 +396,7 @@ void start() { if (remoteClusterStateResponse != null) { assert remoteError == null; if (remoteClusterStateResponse.isWaitForTimedOut()) { + LOGGER.trace("auto-follow coordinator timed out getting remote cluster state from [{}]", remoteCluster); start(); return; } @@ -388,6 +419,11 @@ void start() { }); } + void stop() { + LOGGER.trace("stopping auto-follower for remote cluster [{}]", remoteCluster); + stop = true; + } + private void autoFollowIndices(final AutoFollowMetadata autoFollowMetadata, final ClusterState clusterState, final ClusterState remoteClusterState, @@ -448,9 +484,7 @@ private void checkAutoFollowPattern(String autoFollowPattenName, } } else { final Settings leaderIndexSettings = remoteMetadata.getIndexSafe(indexToFollow).getSettings(); - if (leaderIndexSettings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), - IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(leaderIndexSettings).onOrAfter(Version.V_7_0_0)) == false) { - + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexSettings) == false) { String message = String.format(Locale.ROOT, "index [%s] cannot be followed, because soft deletes are not enabled", indexToFollow.getName()); LOGGER.warn(message); @@ -526,7 +560,7 @@ private void followLeaderIndex(String autoFollowPattenName, request.getParameters().setMaxWriteBufferCount(pattern.getMaxWriteBufferCount()); request.getParameters().setMaxWriteBufferSize(pattern.getMaxWriteBufferSize()); request.getParameters().setMaxRetryDelay(pattern.getMaxRetryDelay()); - request.getParameters().setReadPollTimeout(pattern.getPollTimeout()); + request.getParameters().setReadPollTimeout(pattern.getReadPollTimeout()); // Execute if the create and follow api call succeeds: Runnable successHandler = () -> { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java index f039810ed940c..02ee7d1f138c9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrRequests.java @@ -57,22 +57,29 @@ public static void getIndexMetadata(Client client, Index index, long mappingVers } client.admin().cluster().state(request, ActionListener.wrap( response -> { - if (response.getState() == null) { + if (response.getState() == null) { // timeout on wait_for_metadata_version assert metadataVersion > 0 : metadataVersion; - throw new IllegalStateException("timeout to get cluster state with" + - " metadata version [" + metadataVersion + "], mapping version [" + mappingVersion + "]"); + if (timeoutSupplier.get().nanos() < 0) { + listener.onFailure(new IllegalStateException("timeout to get cluster state with" + + " metadata version [" + metadataVersion + "], mapping version [" + mappingVersion + "]")); + } else { + getIndexMetadata(client, index, mappingVersion, metadataVersion, timeoutSupplier, listener); + } + } else { + final MetaData metaData = response.getState().metaData(); + final IndexMetaData indexMetaData = metaData.getIndexSafe(index); + if (indexMetaData.getMappingVersion() >= mappingVersion) { + listener.onResponse(indexMetaData); + return; + } + if (timeoutSupplier.get().nanos() < 0) { + listener.onFailure(new IllegalStateException( + "timeout to get cluster state with mapping version [" + mappingVersion + "]")); + } else { + // ask for the next version. + getIndexMetadata(client, index, mappingVersion, metaData.version() + 1, timeoutSupplier, listener); + } } - final MetaData metaData = response.getState().metaData(); - final IndexMetaData indexMetaData = metaData.getIndexSafe(index); - if (indexMetaData.getMappingVersion() >= mappingVersion) { - listener.onResponse(indexMetaData); - return; - } - if (timeoutSupplier.get().nanos() < 0) { - throw new IllegalStateException("timeout to get cluster state with mapping version [" + mappingVersion + "]"); - } - // ask for the next version. - getIndexMetadata(client, index, mappingVersion, metaData.version() + 1, timeoutSupplier, listener); }, listener::onFailure )); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 2a4fb7bb402ee..20b13474afa82 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.MissingHistoryOperationsException; @@ -418,6 +419,11 @@ private void globalCheckpointAdvancementFailure( if (e instanceof TimeoutException) { try { final IndexMetaData indexMetaData = clusterService.state().metaData().index(shardId.getIndex()); + if (indexMetaData == null) { + listener.onFailure(new IndexNotFoundException(shardId.getIndex())); + return; + } + final long mappingVersion = indexMetaData.getMappingVersion(); final long settingsVersion = indexMetaData.getSettingsVersion(); final SeqNoStats latestSeqNoStats = indexShard.seqNoStats(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index a4f02707bc40f..0ee86a6058c63 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -30,9 +30,10 @@ import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.transport.NoSuchRemoteClusterException; +import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -94,6 +95,12 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private volatile ElasticsearchException fatalException; + private Scheduler.Cancellable renewable; + + synchronized Scheduler.Cancellable getRenewable() { + return renewable; + } + ShardFollowNodeTask(long id, String type, String action, String description, TaskId parentTask, Map headers, ShardFollowTask params, BiConsumer scheduler, final LongSupplier relativeTimeProvider) { super(id, type, action, description, parentTask, headers); @@ -121,7 +128,8 @@ void start( final long followerMaxSeqNo) { /* * While this should only ever be called once and before any other threads can touch these fields, we use synchronization here to - * avoid the need to declare these fields as volatile. That is, we are ensuring thesefields are always accessed under the same lock. + * avoid the need to declare these fields as volatile. That is, we are ensuring these fields are always accessed under the same + * lock. */ synchronized (this) { this.followerHistoryUUID = followerHistoryUUID; @@ -130,6 +138,11 @@ void start( this.followerGlobalCheckpoint = followerGlobalCheckpoint; this.followerMaxSeqNo = followerMaxSeqNo; this.lastRequestedSeqNo = followerGlobalCheckpoint; + renewable = scheduleBackgroundRetentionLeaseRenewal(() -> { + synchronized (ShardFollowNodeTask.this) { + return this.followerGlobalCheckpoint; + } + }); } // updates follower mapping, this gets us the leader mapping version and makes sure that leader and follower mapping are identical @@ -452,11 +465,15 @@ private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable tas scheduler.accept(TimeValue.timeValueMillis(delay), task); } } else { - fatalException = ExceptionsHelper.convertToElastic(e); - LOGGER.warn("shard follow task encounter non-retryable error", e); + setFatalException(e); } } + void setFatalException(Exception e) { + fatalException = ExceptionsHelper.convertToElastic(e); + LOGGER.warn("shard follow task encounter non-retryable error", e); + } + static long computeDelay(int currentRetry, long maxRetryDelayInMillis) { // Cap currentRetry to avoid overflow when computing n variable int maxCurrentRetry = Math.min(currentRetry, 24); @@ -503,8 +520,16 @@ protected abstract void innerSendBulkShardOperationsRequest(String followerHisto protected abstract void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, Consumer errorHandler); + protected abstract Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(LongSupplier followerGlobalCheckpoint); + @Override protected void onCancelled() { + synchronized (this) { + if (renewable != null) { + renewable.cancel(); + renewable = null; + } + } markAsCompleted(); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index dc7194aa4e0e5..8852e81607c16 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -14,11 +14,11 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.action.ImmutableFollowParameters; import java.io.IOException; import java.util.Arrays; @@ -28,7 +28,7 @@ import java.util.Objects; import java.util.Set; -public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { +public class ShardFollowTask extends ImmutableFollowParameters implements XPackPlugin.XPackPersistentTaskParams { public static final String NAME = "xpack/ccr/shard_follow_task"; @@ -36,31 +36,21 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final Set HEADER_FILTERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication"))); - static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); - static final ParseField FOLLOW_SHARD_INDEX_FIELD = new ParseField("follow_shard_index"); - static final ParseField FOLLOW_SHARD_INDEX_UUID_FIELD = new ParseField("follow_shard_index_uuid"); - static final ParseField FOLLOW_SHARD_SHARDID_FIELD = new ParseField("follow_shard_shard"); - static final ParseField LEADER_SHARD_INDEX_FIELD = new ParseField("leader_shard_index"); - static final ParseField LEADER_SHARD_INDEX_UUID_FIELD = new ParseField("leader_shard_index_uuid"); - static final ParseField LEADER_SHARD_SHARDID_FIELD = new ParseField("leader_shard_shard"); - static final ParseField HEADERS = new ParseField("headers"); - public static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); - public static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); - public static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); - public static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); - public static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); - public static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); - public static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); - public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); + private static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); + private static final ParseField FOLLOW_SHARD_INDEX_FIELD = new ParseField("follow_shard_index"); + private static final ParseField FOLLOW_SHARD_INDEX_UUID_FIELD = new ParseField("follow_shard_index_uuid"); + private static final ParseField FOLLOW_SHARD_SHARDID_FIELD = new ParseField("follow_shard_shard"); + private static final ParseField LEADER_SHARD_INDEX_FIELD = new ParseField("leader_shard_index"); + private static final ParseField LEADER_SHARD_INDEX_UUID_FIELD = new ParseField("leader_shard_index_uuid"); + private static final ParseField LEADER_SHARD_SHARDID_FIELD = new ParseField("leader_shard_shard"); + private static final ParseField HEADERS = new ParseField("headers"); @SuppressWarnings("unchecked") private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), new ShardId((String) a[4], (String) a[5], (int) a[6]), - (int) a[7], (ByteSizeValue) a[8], (int) a[9], (int) a[10], (ByteSizeValue) a[11], (int) a[12], - (int) a[13], (ByteSizeValue) a[14], (TimeValue) a[15], (TimeValue) a[16], (Map) a[17])); + (Integer) a[7], (Integer) a[8], (Integer) a[9], (Integer) a[10], (ByteSizeValue) a[11], (ByteSizeValue) a[12], + (Integer) a[13], (ByteSizeValue) a[14], (TimeValue) a[15], (TimeValue) a[16], (Map) a[17])); static { PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REMOTE_CLUSTER_FIELD); @@ -70,48 +60,13 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_FIELD); PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_UUID_FIELD); PARSER.declareInt(ConstructingObjectParser.constructorArg(), LEADER_SHARD_SHARDID_FIELD); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), - MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), - MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - ConstructingObjectParser.constructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), - MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), - MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), - READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + ImmutableFollowParameters.initParser(PARSER); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } private final String remoteCluster; private final ShardId followShardId; private final ShardId leaderShardId; - private final int maxReadRequestOperationCount; - private final ByteSizeValue maxReadRequestSize; - private final int maxOutstandingReadRequests; - private final int maxWriteRequestOperationCount; - private final ByteSizeValue maxWriteRequestSize; - private final int maxOutstandingWriteRequests; - private final int maxWriteBufferCount; - private final ByteSizeValue maxWriteBufferSize; - private final TimeValue maxRetryDelay; - private final TimeValue readPollTimeout; private final Map headers; ShardFollowTask( @@ -119,46 +74,36 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { final ShardId followShardId, final ShardId leaderShardId, final int maxReadRequestOperationCount, - final ByteSizeValue maxReadRequestSize, - final int maxOutstandingReadRequests, final int maxWriteRequestOperationCount, - final ByteSizeValue maxWriteRequestSize, + final int maxOutstandingReadRequests, final int maxOutstandingWriteRequests, + final ByteSizeValue maxReadRequestSize, + final ByteSizeValue maxWriteRequestSize, final int maxWriteBufferCount, final ByteSizeValue maxWriteBufferSize, final TimeValue maxRetryDelay, final TimeValue readPollTimeout, final Map headers) { + super(maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, readPollTimeout); this.remoteCluster = remoteCluster; this.followShardId = followShardId; this.leaderShardId = leaderShardId; - this.maxReadRequestOperationCount = maxReadRequestOperationCount; - this.maxReadRequestSize = maxReadRequestSize; - this.maxOutstandingReadRequests = maxOutstandingReadRequests; - this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; - this.maxWriteRequestSize = maxWriteRequestSize; - this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; - this.maxWriteBufferCount = maxWriteBufferCount; - this.maxWriteBufferSize = maxWriteBufferSize; - this.maxRetryDelay = maxRetryDelay; - this.readPollTimeout = readPollTimeout; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } - public ShardFollowTask(StreamInput in) throws IOException { - this.remoteCluster = in.readString(); - this.followShardId = ShardId.readShardId(in); - this.leaderShardId = ShardId.readShardId(in); - this.maxReadRequestOperationCount = in.readVInt(); - this.maxReadRequestSize = new ByteSizeValue(in); - this.maxOutstandingReadRequests = in.readVInt(); - this.maxWriteRequestOperationCount = in.readVInt(); - this.maxWriteRequestSize = new ByteSizeValue(in); - this.maxOutstandingWriteRequests = in.readVInt(); - this.maxWriteBufferCount = in.readVInt(); - this.maxWriteBufferSize = new ByteSizeValue(in); - this.maxRetryDelay = in.readTimeValue(); - this.readPollTimeout = in.readTimeValue(); + public static ShardFollowTask readFrom(StreamInput in) throws IOException { + String remoteCluster = in.readString(); + ShardId followShardId = ShardId.readShardId(in); + ShardId leaderShardId = ShardId.readShardId(in); + return new ShardFollowTask(remoteCluster, followShardId, leaderShardId, in); + } + + private ShardFollowTask(String remoteCluster, ShardId followShardId, ShardId leaderShardId, StreamInput in) throws IOException { + super(in); + this.remoteCluster = remoteCluster; + this.followShardId = followShardId; + this.leaderShardId = leaderShardId; this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } @@ -174,50 +119,6 @@ public ShardId getLeaderShardId() { return leaderShardId; } - public int getMaxReadRequestOperationCount() { - return maxReadRequestOperationCount; - } - - public int getMaxOutstandingReadRequests() { - return maxOutstandingReadRequests; - } - - public int getMaxWriteRequestOperationCount() { - return maxWriteRequestOperationCount; - } - - public ByteSizeValue getMaxWriteRequestSize() { - return maxWriteRequestSize; - } - - public int getMaxOutstandingWriteRequests() { - return maxOutstandingWriteRequests; - } - - public int getMaxWriteBufferCount() { - return maxWriteBufferCount; - } - - public ByteSizeValue getMaxWriteBufferSize() { - return maxWriteBufferSize; - } - - public ByteSizeValue getMaxReadRequestSize() { - return maxReadRequestSize; - } - - public TimeValue getMaxRetryDelay() { - return maxRetryDelay; - } - - public TimeValue getReadPollTimeout() { - return readPollTimeout; - } - - public String getTaskId() { - return followShardId.getIndex().getUUID() + "-" + followShardId.getId(); - } - public Map getHeaders() { return headers; } @@ -232,16 +133,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); followShardId.writeTo(out); leaderShardId.writeTo(out); - out.writeVLong(maxReadRequestOperationCount); - maxReadRequestSize.writeTo(out); - out.writeVInt(maxOutstandingReadRequests); - out.writeVLong(maxWriteRequestOperationCount); - maxWriteRequestSize.writeTo(out); - out.writeVInt(maxOutstandingWriteRequests); - out.writeVInt(maxWriteBufferCount); - maxWriteBufferSize.writeTo(out); - out.writeTimeValue(maxRetryDelay); - out.writeTimeValue(readPollTimeout); + super.writeTo(out); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -259,16 +151,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(LEADER_SHARD_INDEX_FIELD.getPreferredName(), leaderShardId.getIndex().getName()); builder.field(LEADER_SHARD_INDEX_UUID_FIELD.getPreferredName(), leaderShardId.getIndex().getUUID()); builder.field(LEADER_SHARD_SHARDID_FIELD.getPreferredName(), leaderShardId.id()); - builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); - builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); - builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); - builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); - builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); - builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); - builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); - builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + toXContentFragment(builder); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); } @@ -277,39 +160,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; ShardFollowTask that = (ShardFollowTask) o; return Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(followShardId, that.followShardId) && Objects.equals(leaderShardId, that.leaderShardId) && - maxReadRequestOperationCount == that.maxReadRequestOperationCount && - maxReadRequestSize.equals(that.maxReadRequestSize) && - maxOutstandingReadRequests == that.maxOutstandingReadRequests && - maxWriteRequestOperationCount == that.maxWriteRequestOperationCount && - maxWriteRequestSize.equals(that.maxWriteRequestSize) && - maxOutstandingWriteRequests == that.maxOutstandingWriteRequests && - maxWriteBufferCount == that.maxWriteBufferCount && - maxWriteBufferSize.equals(that.maxWriteBufferSize) && - Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(readPollTimeout, that.readPollTimeout) && Objects.equals(headers, that.headers); } @Override public int hashCode() { return Objects.hash( + super.hashCode(), remoteCluster, followShardId, leaderShardId, - maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, - maxWriteRequestOperationCount, - maxWriteRequestSize, - maxOutstandingWriteRequests, - maxWriteBufferCount, - maxWriteBufferSize, - maxRetryDelay, - readPollTimeout, headers ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index c0e2d7f54b318..81d8750d07c6d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -8,6 +8,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -31,10 +34,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; @@ -44,8 +51,11 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrRetentionLeases; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; @@ -58,6 +68,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; +import java.util.function.LongSupplier; import java.util.function.Supplier; import static org.elasticsearch.xpack.ccr.CcrLicenseChecker.wrapClient; @@ -71,6 +82,7 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor this.waitForMetadataTimeOut = newVal); @@ -113,7 +126,16 @@ protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer h final Index followerIndex = params.getFollowShardId().getIndex(); final Index leaderIndex = params.getLeaderShardId().getIndex(); final Supplier timeout = () -> isStopped() ? TimeValue.MINUS_ONE : waitForMetadataTimeOut; - CcrRequests.getIndexMetadata(remoteClient(params), leaderIndex, minRequiredMappingVersion, 0L, timeout, ActionListener.wrap( + + final Client remoteClient; + try { + remoteClient = remoteClient(params); + } catch (NoSuchRemoteClusterException e) { + errorHandler.accept(e); + return; + } + + CcrRequests.getIndexMetadata(remoteClient, leaderIndex, minRequiredMappingVersion, 0L, timeout, ActionListener.wrap( indexMetaData -> { if (indexMetaData.getMappings().isEmpty()) { assert indexMetaData.getMappingVersion() == 1; @@ -172,7 +194,7 @@ protected void innerUpdateSettings(final LongConsumer finalHandler, final Consum }; try { remoteClient(params).admin().cluster().state(clusterStateRequest, ActionListener.wrap(onResponse, errorHandler)); - } catch (Exception e) { + } catch (NoSuchRemoteClusterException e) { errorHandler.accept(e); } } @@ -230,10 +252,100 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co request.setPollTimeout(params.getReadPollTimeout()); try { remoteClient(params).execute(ShardChangesAction.INSTANCE, request, ActionListener.wrap(handler::accept, errorHandler)); - } catch (Exception e) { + } catch (NoSuchRemoteClusterException e) { errorHandler.accept(e); } } + + @Override + protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { + final String retentionLeaseId = CcrRetentionLeases.retentionLeaseId( + clusterService.getClusterName().value(), + params.getFollowShardId().getIndex(), + params.getRemoteCluster(), + params.getLeaderShardId().getIndex()); + + /* + * We are going to attempt to renew the retention lease. If this fails it is either because the retention lease does not + * exist, or something else happened. If the retention lease does not exist, we will attempt to add the retention lease + * again. If that fails, it had better not be because the retention lease already exists. Either way, we will attempt to + * renew again on the next scheduled execution. + */ + final ActionListener listener = ActionListener.wrap( + r -> {}, + e -> { + /* + * We have to guard against the possibility that the shard follow node task has been stopped and the retention + * lease deliberately removed via the act of unfollowing. Note that the order of operations is important in + * TransportUnfollowAction. There, we first stop the shard follow node task, and then remove the retention + * leases on the leader. This means that if we end up here with the retention lease not existing because of an + * unfollow action, then we know that the unfollow action has already stopped the shard follow node task and + * there is no race condition with the unfollow action. + */ + if (isCancelled() || isCompleted()) { + return; + } + final Throwable cause = ExceptionsHelper.unwrapCause(e); + logRetentionLeaseFailure(retentionLeaseId, cause); + // noinspection StatementWithEmptyBody + if (cause instanceof RetentionLeaseNotFoundException) { + // note that we do not need to mark as system context here as that is restored from the original renew + logger.trace( + "{} background adding retention lease [{}] while following", + params.getFollowShardId(), + retentionLeaseId); + CcrRetentionLeases.asyncAddRetentionLease( + params.getLeaderShardId(), + retentionLeaseId, + followerGlobalCheckpoint.getAsLong(), + remoteClient(params), + ActionListener.wrap( + r -> {}, + inner -> { + /* + * If this fails that the retention lease already exists, something highly unusual is + * going on. Log it, and renew again after another renew interval has passed. + */ + final Throwable innerCause = ExceptionsHelper.unwrapCause(inner); + assert innerCause instanceof RetentionLeaseAlreadyExistsException == false; + logRetentionLeaseFailure(retentionLeaseId, innerCause); + })); + } else { + // if something else happened, we will attempt to renew again after another renew interval has passed + } + }); + + return threadPool.scheduleWithFixedDelay( + () -> { + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the management is authorized + threadContext.markAsSystemContext(); + logger.trace( + "{} background renewing retention lease [{}] while following", + params.getFollowShardId(), + retentionLeaseId); + CcrRetentionLeases.asyncRenewRetentionLease( + params.getLeaderShardId(), + retentionLeaseId, + followerGlobalCheckpoint.getAsLong(), + remoteClient(params), + listener); + } + }, + retentionLeaseRenewInterval, + Ccr.CCR_THREAD_POOL_NAME); + } + + private void logRetentionLeaseFailure(final String retentionLeaseId, final Throwable cause) { + assert cause instanceof ElasticsearchSecurityException == false : cause; + logger.warn(new ParameterizedMessage( + "{} background management of retention lease [{}] failed while following", + params.getFollowShardId(), + retentionLeaseId), + cause); + } + }; } @@ -271,7 +383,7 @@ protected void nodeOperation(final AllocatedPersistentTask task, final ShardFoll shardFollowNodeTask), e); threadPool.schedule(() -> nodeOperation(task, params, state), params.getMaxRetryDelay(), Ccr.CCR_THREAD_POOL_NAME); } else { - shardFollowNodeTask.markAsFailed(e); + shardFollowNodeTask.setFatalException(e); } }; @@ -302,9 +414,21 @@ private void fetchFollowerShardInfo( if (filteredShardStats.isPresent()) { final ShardStats shardStats = filteredShardStats.get(); final CommitStats commitStats = shardStats.getCommitStats(); - final String historyUUID = commitStats.getUserData().get(Engine.HISTORY_UUID_KEY); - + if (commitStats == null) { + // If commitStats is null then AlreadyClosedException has been thrown: TransportIndicesStatsAction#shardOperation(...) + // AlreadyClosedException will be retried byShardFollowNodeTask.shouldRetry(...) + errorHandler.accept(new AlreadyClosedException(shardId + " commit_stats are missing")); + return; + } final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + if (seqNoStats == null) { + // If seqNoStats is null then AlreadyClosedException has been thrown at TransportIndicesStatsAction#shardOperation(...) + // AlreadyClosedException will be retried byShardFollowNodeTask.shouldRetry(...) + errorHandler.accept(new AlreadyClosedException(shardId + " seq_no_stats are missing")); + return; + } + + final String historyUUID = commitStats.getUserData().get(Engine.HISTORY_UUID_KEY); final long globalCheckpoint = seqNoStats.getGlobalCheckpoint(); final long maxSeqNo = seqNoStats.getMaxSeqNo(); handler.accept(historyUUID, globalCheckpoint, maxSeqNo); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportForgetFollowerAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportForgetFollowerAction.java new file mode 100644 index 0000000000000..5656450f4c696 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportForgetFollowerAction.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Assertions; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.PlainShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrRetentionLeases; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +public class TransportForgetFollowerAction extends TransportBroadcastByNodeAction< + ForgetFollowerAction.Request, + BroadcastResponse, + TransportBroadcastByNodeAction.EmptyResult> { + + private final ClusterService clusterService; + private final IndicesService indicesService; + + @Inject + public TransportForgetFollowerAction( + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final IndicesService indicesService) { + super( + ForgetFollowerAction.NAME, + Objects.requireNonNull(clusterService), + Objects.requireNonNull(transportService), + Objects.requireNonNull(actionFilters), + Objects.requireNonNull(indexNameExpressionResolver), + ForgetFollowerAction.Request::new, + ThreadPool.Names.MANAGEMENT); + this.clusterService = clusterService; + this.indicesService = Objects.requireNonNull(indicesService); + } + + @Override + protected EmptyResult readShardResult(final StreamInput in) { + return EmptyResult.readEmptyResultFrom(in); + } + + @Override + protected BroadcastResponse newResponse( + final ForgetFollowerAction.Request request, + final int totalShards, + final int successfulShards, + final int failedShards, List emptyResults, + final List shardFailures, + final ClusterState clusterState) { + return new BroadcastResponse(totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + protected ForgetFollowerAction.Request readRequestFrom(final StreamInput in) throws IOException { + return new ForgetFollowerAction.Request(in); + } + + @Override + protected EmptyResult shardOperation(final ForgetFollowerAction.Request request, final ShardRouting shardRouting) { + final Index followerIndex = new Index(request.followerIndex(), request.followerIndexUUID()); + final Index leaderIndex = clusterService.state().metaData().index(request.leaderIndex()).getIndex(); + final String id = CcrRetentionLeases.retentionLeaseId( + request.followerCluster(), + followerIndex, + request.leaderRemoteCluster(), + leaderIndex); + + final IndexShard indexShard = indicesService.indexServiceSafe(leaderIndex).getShard(shardRouting.shardId().id()); + + final PlainActionFuture permit = new PlainActionFuture<>(); + indexShard.acquirePrimaryOperationPermit(permit, ThreadPool.Names.SAME, request); + try (Releasable ignored = permit.get()) { + final PlainActionFuture future = new PlainActionFuture<>(); + indexShard.removeRetentionLease(id, future); + future.get(); + } catch (final ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } + + return EmptyResult.INSTANCE; + } + + @Override + protected ShardsIterator shards( + final ClusterState clusterState, + final ForgetFollowerAction.Request request, + final String[] concreteIndices) { + final GroupShardsIterator activePrimaryShards = + clusterState.routingTable().activePrimaryShardsGrouped(concreteIndices, false); + final List shardRoutings = new ArrayList<>(); + final Iterator it = activePrimaryShards.iterator(); + while (it.hasNext()) { + final ShardIterator shardIterator = it.next(); + final ShardRouting primaryShard = shardIterator.nextOrNull(); + assert primaryShard != null; + shardRoutings.add(primaryShard); + if (Assertions.ENABLED) { + final ShardRouting maybeNextPrimaryShard = shardIterator.nextOrNull(); + assert maybeNextPrimaryShard == null : maybeNextPrimaryShard; + } + } + return new PlainShardsIterator(shardRoutings); + } + + @Override + protected ClusterBlockException checkGlobalBlock(final ClusterState state, final ForgetFollowerAction.Request request) { + return null; + } + + @Override + protected ClusterBlockException checkRequestBlock( + final ClusterState state, + final ForgetFollowerAction.Request request, + final String[] concreteIndices) { + return null; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index d5127cbb74d4b..a218ec2dcaa7c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -159,11 +159,11 @@ static ClusterState innerPut(PutAutoFollowPatternAction.Request request, request.getLeaderIndexPatterns(), request.getFollowIndexNamePattern(), request.getParameters().getMaxReadRequestOperationCount(), - request.getParameters().getMaxReadRequestSize(), - request.getParameters().getMaxOutstandingReadRequests(), request.getParameters().getMaxWriteRequestOperationCount(), - request.getParameters().getMaxWriteRequestSize(), + request.getParameters().getMaxOutstandingReadRequests(), request.getParameters().getMaxOutstandingWriteRequests(), + request.getParameters().getMaxReadRequestSize(), + request.getParameters().getMaxWriteRequestSize(), request.getParameters().getMaxWriteBufferCount(), request.getParameters().getMaxWriteBufferSize(), request.getParameters().getMaxRetryDelay(), diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index aa94071ac1d35..3f7f361d4b375 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; @@ -124,9 +123,7 @@ private void createFollowerIndex( listener.onFailure(new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not exist")); return; } - // soft deletes are enabled by default on indices created on 7.0.0 or later - if (leaderIndexMetaData.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), - IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(leaderIndexMetaData.getSettings()).onOrAfter(Version.V_7_0_0)) == false) { + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndexMetaData.getSettings()) == false) { listener.onFailure(new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not have soft deletes enabled")); return; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 150e1df7a3bae..1d595e7c95b8c 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -214,9 +213,7 @@ static void validate( "] as history uuid"); } } - // soft deletes are enabled by default on indices created on 7.0.0 or later - if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), - IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(leaderIndex.getSettings()).onOrAfter(Version.V_7_0_0)) == false) { + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(leaderIndex.getSettings()) == false) { throw new IllegalArgumentException("leader index [" + leaderIndex.getIndex().getName() + "] does not have soft deletes enabled"); } @@ -243,7 +240,8 @@ static void validate( Settings leaderSettings = filter(leaderIndex.getSettings()); Settings followerSettings = filter(followIndex.getSettings()); if (leaderSettings.equals(followerSettings) == false) { - throw new IllegalArgumentException("the leader and follower index settings must be identical"); + throw new IllegalArgumentException("the leader index setting[" + leaderSettings + "] and follower index settings [" + + followerSettings + "] must be identical"); } // Validates if the current follower mapping is mergable with the leader mapping. @@ -323,11 +321,11 @@ private static ShardFollowTask createShardFollowTask( new ShardId(followIndexMetadata.getIndex(), shardId), new ShardId(leaderIndexMetadata.getIndex(), shardId), maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, maxWriteRequestOperationCount, - maxWriteRequestSize, + maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, + maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, @@ -388,7 +386,7 @@ static String[] extractLeaderShardHistoryUUIDs(Map ccrIndexMetaD nonReplicatedSettings.add(IndexSettings.ALLOW_UNMAPPED); nonReplicatedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING); + nonReplicatedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_SCRIPT_FIELDS_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_REGEX_LENGTH_SETTING); nonReplicatedSettings.add(IndexSettings.MAX_TERMS_COUNT_SETTING); @@ -456,6 +454,11 @@ static Settings filter(Settings originalSettings) { settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); settings.remove(IndexMetaData.SETTING_CREATION_DATE); + // Follower index may be upgraded, while the leader index hasn't been upgraded, so it is expected + // that these settings are different: + settings.remove(IndexMetaData.SETTING_VERSION_UPGRADED); + settings.remove(IndexMetaData.SETTING_VERSION_UPGRADED_STRING); + Iterator iterator = settings.keys().iterator(); while (iterator.hasNext()) { String key = iterator.next(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index 3a158aceddb2d..0e6b0cccefffb 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -6,10 +6,16 @@ package org.elasticsearch.xpack.ccr.action; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -20,22 +26,46 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrRetentionLeases; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + public class TransportUnfollowAction extends TransportMasterNodeAction { + private final Client client; + @Inject - public TransportUnfollowAction(TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(UnfollowAction.NAME, transportService, clusterService, threadPool, actionFilters, - UnfollowAction.Request::new, indexNameExpressionResolver); + public TransportUnfollowAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Client client) { + super( + UnfollowAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + UnfollowAction.Request::new, + indexNameExpressionResolver); + this.client = Objects.requireNonNull(client); } @Override @@ -49,26 +79,128 @@ protected AcknowledgedResponse newResponse() { } @Override - protected void masterOperation(UnfollowAction.Request request, - ClusterState state, - ActionListener listener) throws Exception { + protected void masterOperation( + final UnfollowAction.Request request, + final ClusterState state, + final ActionListener listener) { clusterService.submitStateUpdateTask("unfollow_action", new ClusterStateUpdateTask() { @Override - public ClusterState execute(ClusterState current) throws Exception { + public ClusterState execute(final ClusterState current) { String followerIndex = request.getFollowerIndex(); return unfollow(followerIndex, current); } @Override - public void onFailure(String source, Exception e) { + public void onFailure(final String source, final Exception e) { listener.onFailure(e); } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(new AcknowledgedResponse(true)); + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + final IndexMetaData indexMetaData = oldState.metaData().index(request.getFollowerIndex()); + final Map ccrCustomMetaData = indexMetaData.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + final String remoteClusterName = ccrCustomMetaData.get(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY); + final Client remoteClient = client.getRemoteClusterClient(remoteClusterName); + final String leaderIndexName = ccrCustomMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); + final String leaderIndexUuid = ccrCustomMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); + final Index leaderIndex = new Index(leaderIndexName, leaderIndexUuid); + final String retentionLeaseId = CcrRetentionLeases.retentionLeaseId( + oldState.getClusterName().value(), + indexMetaData.getIndex(), + remoteClusterName, + leaderIndex); + final int numberOfShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexMetaData.getSettings()); + + final GroupedActionListener groupListener = new GroupedActionListener<>( + new ActionListener>() { + + @Override + public void onResponse(final Collection responses) { + logger.trace( + "[{}] removed retention lease [{}] on all leader primary shards", + indexMetaData.getIndex(), + retentionLeaseId); + listener.onResponse(new AcknowledgedResponse(true)); + } + + @Override + public void onFailure(final Exception e) { + logger.warn(new ParameterizedMessage( + "[{}] failure while removing retention lease [{}] on leader primary shards", + indexMetaData.getIndex(), + retentionLeaseId), + e); + final ElasticsearchException wrapper = new ElasticsearchException(e); + wrapper.addMetadata("es.failed_to_remove_retention_leases", retentionLeaseId); + listener.onFailure(wrapper); + } + + }, + numberOfShards, + Collections.emptyList()); + for (int i = 0; i < numberOfShards; i++) { + final ShardId followerShardId = new ShardId(indexMetaData.getIndex(), i); + final ShardId leaderShardId = new ShardId(leaderIndex, i); + removeRetentionLeaseForShard( + followerShardId, + leaderShardId, + retentionLeaseId, + remoteClient, + ActionListener.wrap( + groupListener::onResponse, + e -> handleException( + followerShardId, + retentionLeaseId, + leaderShardId, + groupListener, + e))); + } } + + private void removeRetentionLeaseForShard( + final ShardId followerShardId, + final ShardId leaderShardId, + final String retentionLeaseId, + final Client remoteClient, + final ActionListener listener) { + logger.trace("{} removing retention lease [{}] while unfollowing leader index", followerShardId, retentionLeaseId); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { + // we have to execute under the system context so that if security is enabled the removal is authorized + threadContext.markAsSystemContext(); + CcrRetentionLeases.asyncRemoveRetentionLease(leaderShardId, retentionLeaseId, remoteClient, listener); + } + } + + private void handleException( + final ShardId followerShardId, + final String retentionLeaseId, + final ShardId leaderShardId, + final ActionListener listener, + final Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + assert cause instanceof ElasticsearchSecurityException == false : e; + if (cause instanceof RetentionLeaseNotFoundException) { + // treat as success + logger.trace(new ParameterizedMessage( + "{} retention lease [{}] not found on {} while unfollowing", + followerShardId, + retentionLeaseId, + leaderShardId), + e); + listener.onResponse(new RetentionLeaseActions.Response()); + } else { + logger.warn(new ParameterizedMessage( + "{} failed to remove retention lease [{}] on {} while unfollowing", + followerShardId, + retentionLeaseId, + leaderShardId), + e); + listener.onFailure(e); + } + } + }); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java index 9afc57309cccc..7c3b2da32e88f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -70,7 +70,7 @@ protected WritePrimaryResult { public static final ClearCcrRestoreSessionAction INSTANCE = new ClearCcrRestoreSessionAction(); - private static final String NAME = "internal:admin/ccr/restore/session/clear"; + public static final String NAME = "internal:admin/ccr/restore/session/clear"; private ClearCcrRestoreSessionAction() { super(NAME); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index cf8d2e5c55f48..37dfc84f46a01 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -51,7 +51,6 @@ public static class TransportGetCcrRestoreFileChunkAction extends HandledTransportAction { private final CcrRestoreSourceService restoreSourceService; - private final ThreadPool threadPool; private final BigArrays bigArrays; @Inject @@ -59,7 +58,6 @@ public TransportGetCcrRestoreFileChunkAction(BigArrays bigArrays, TransportServi CcrRestoreSourceService restoreSourceService) { super(NAME, transportService, actionFilters, GetCcrRestoreFileChunkRequest::new, ThreadPool.Names.GENERIC); TransportActionProxy.registerProxyAction(transportService, NAME, GetCcrRestoreFileChunkResponse::new); - this.threadPool = transportService.getThreadPool(); this.restoreSourceService = restoreSourceService; this.bigArrays = bigArrays; } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java index eceacc1d926d8..393548225a0c2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutCcrRestoreSessionAction.java @@ -33,7 +33,7 @@ public class PutCcrRestoreSessionAction extends Action { public static final PutCcrRestoreSessionAction INSTANCE = new PutCcrRestoreSessionAction(); - private static final String NAME = "internal:admin/ccr/restore/session/put"; + public static final String NAME = "internal:admin/ccr/restore/session/put"; private PutCcrRestoreSessionAction() { super(NAME); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java index 23157c177816f..acffacd4051d5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -68,6 +68,7 @@ private void preFlight(final Operation operation) { @Override protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { preFlight(index); + markSeqNoAsSeen(index.seqNo()); // NOTES: refer Engine#getMaxSeqNoOfUpdatesOrDeletes for the explanation of the optimization using sequence numbers. final long maxSeqNoOfUpdatesOrDeletes = getMaxSeqNoOfUpdatesOrDeletes(); assert maxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "max_seq_no_of_updates is not initialized"; @@ -103,6 +104,7 @@ protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Ind @Override protected InternalEngine.DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { preFlight(delete); + markSeqNoAsSeen(delete.seqNo()); if (delete.origin() == Operation.Origin.PRIMARY && hasBeenProcessedBefore(delete)) { // See the comment in #indexingStrategyForOperation for the explanation why we can safely skip this operation. final AlreadyProcessedFollowingEngineException error = new AlreadyProcessedFollowingEngineException( @@ -149,32 +151,33 @@ protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { } private OptionalLong lookupPrimaryTerm(final long seqNo) throws IOException { + // Don't need to look up term for operations before the global checkpoint for they were processed on every copies already. + if (seqNo <= engineConfig.getGlobalCheckpointSupplier().getAsLong()) { + return OptionalLong.empty(); + } refreshIfNeeded("lookup_primary_term", seqNo); try (Searcher engineSearcher = acquireSearcher("lookup_primary_term", SearcherScope.INTERNAL)) { - // We have to acquire a searcher before execute this check to ensure that the requesting seq_no is always found in the else - // branch. If the operation is at most the global checkpoint, we should not look up its term as we may have merged away the - // operation. Moreover, we won't need to replicate this operation to replicas since it was processed on every copies already. + final DirectoryReader reader = Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader()); + final IndexSearcher searcher = new IndexSearcher(reader); + searcher.setQueryCache(null); + final Query query = new BooleanQuery.Builder() + .add(LongPoint.newExactQuery(SeqNoFieldMapper.NAME, seqNo), BooleanClause.Occur.FILTER) + // excludes the non-root nested documents which don't have primary_term. + .add(new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME), BooleanClause.Occur.FILTER) + .build(); + final TopDocs topDocs = searcher.search(query, 1); + if (topDocs.scoreDocs.length == 1) { + final int docId = topDocs.scoreDocs[0].doc; + final LeafReaderContext leaf = reader.leaves().get(ReaderUtil.subIndex(docId, reader.leaves())); + final NumericDocValues primaryTermDV = leaf.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + if (primaryTermDV != null && primaryTermDV.advanceExact(docId - leaf.docBase)) { + assert primaryTermDV.longValue() > 0 : "invalid term [" + primaryTermDV.longValue() + "]"; + return OptionalLong.of(primaryTermDV.longValue()); + } + } if (seqNo <= engineConfig.getGlobalCheckpointSupplier().getAsLong()) { - return OptionalLong.empty(); + return OptionalLong.empty(); // we have merged away the looking up operation. } else { - final DirectoryReader reader = Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader()); - final IndexSearcher searcher = new IndexSearcher(reader); - searcher.setQueryCache(null); - final Query query = new BooleanQuery.Builder() - .add(LongPoint.newExactQuery(SeqNoFieldMapper.NAME, seqNo), BooleanClause.Occur.FILTER) - // excludes the non-root nested documents which don't have primary_term. - .add(new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME), BooleanClause.Occur.FILTER) - .build(); - final TopDocs topDocs = searcher.search(query, 1); - if (topDocs.scoreDocs.length == 1) { - final int docId = topDocs.scoreDocs[0].doc; - final LeafReaderContext leaf = reader.leaves().get(ReaderUtil.subIndex(docId, reader.leaves())); - final NumericDocValues primaryTermDV = leaf.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); - if (primaryTermDV != null && primaryTermDV.advanceExact(docId - leaf.docBase)) { - assert primaryTermDV.longValue() > 0 : "invalid term [" + primaryTermDV.longValue() + "]"; - return OptionalLong.of(primaryTermDV.longValue()); - } - } assert false : "seq_no[" + seqNo + "] does not have primary_term, total_hits=[" + topDocs.totalHits + "]"; throw new IllegalStateException("seq_no[" + seqNo + "] does not have primary_term (total_hits=" + topDocs.totalHits + ")"); } @@ -195,4 +198,12 @@ private OptionalLong lookupPrimaryTerm(final long seqNo) throws IOException { public long getNumberOfOptimizedIndexing() { return numOfOptimizedIndexing.count(); } + + @Override + public void verifyEngineBeforeIndexClosing() throws IllegalStateException { + // the value of the global checkpoint is not verified when the following engine is closed, + // allowing it to be closed even in the case where all operations have not been fetched and + // processed from the leader and the operations history has gaps. This way the following + // engine can be closed and reopened in order to bootstrap the follower index again. + } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 833cd474450ff..0b445a3eb01ef 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -8,13 +8,22 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.support.ListenerTimeouts; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -22,26 +31,30 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CombinedRateLimiter; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRecoveryException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; -import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.elasticsearch.index.snapshots.blobstore.SnapshotFiles; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; +import org.elasticsearch.indices.recovery.MultiFileWriter; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; @@ -51,8 +64,11 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrRetentionLeases; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.CcrRequests; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; @@ -70,10 +86,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongConsumer; import java.util.function.Supplier; +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncAddRetentionLease; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.syncRenewRetentionLease; + /** * This repository relies on a remote cluster for Ccr restores. It is read-only so it can only be used to @@ -81,6 +105,8 @@ */ public class CcrRepository extends AbstractLifecycleComponent implements Repository { + private static final Logger logger = LogManager.getLogger(CcrRepository.class); + public static final String LATEST = "_latest_"; public static final String TYPE = "_ccr_"; public static final String NAME_PREFIX = "_ccr_"; @@ -89,20 +115,24 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit private final RepositoryMetaData metadata; private final CcrSettings ccrSettings; + private final String localClusterName; private final String remoteClusterAlias; private final Client client; private final CcrLicenseChecker ccrLicenseChecker; + private final ThreadPool threadPool; private final CounterMetric throttledTime = new CounterMetric(); - + public CcrRepository(RepositoryMetaData metadata, Client client, CcrLicenseChecker ccrLicenseChecker, Settings settings, - CcrSettings ccrSettings) { + CcrSettings ccrSettings, ThreadPool threadPool) { this.metadata = metadata; this.ccrSettings = ccrSettings; + this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings).value(); assert metadata.name().startsWith(NAME_PREFIX) : "CcrRepository metadata.name() must start with: " + NAME_PREFIX; this.remoteClusterAlias = Strings.split(metadata.name(), NAME_PREFIX)[1]; this.ccrLicenseChecker = ccrLicenseChecker; this.client = client; + this.threadPool = threadPool; } @Override @@ -125,10 +155,14 @@ public RepositoryMetaData getMetadata() { return metadata; } + private Client getRemoteClusterClient() { + return client.getRemoteClusterClient(remoteClusterAlias); + } + @Override public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true).setNodes(true) .get(ccrSettings.getRecoveryActionTimeout()); ImmutableOpenMap indicesMap = response.getState().metaData().indices(); @@ -141,7 +175,7 @@ public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { @Override public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); // We set a single dummy index name to avoid fetching all the index data ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest("dummy_index_name"); ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) @@ -153,7 +187,7 @@ public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; String leaderIndex = index.getName(); - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex); ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) @@ -192,7 +226,7 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind @Override public RepositoryData getRepositoryData() { - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); + Client remoteClient = getRemoteClusterClient(); ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true) .get(ccrSettings.getRecoveryActionTimeout()); MetaData remoteMetaData = response.getState().getMetaData(); @@ -269,31 +303,110 @@ public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, public void restoreShard(IndexShard indexShard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId, RecoveryState recoveryState) { // TODO: Add timeouts to network calls / the restore process. + createEmptyStore(indexShard, shardId); + + final Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + final String leaderIndexName = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY); + final String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); + final Index leaderIndex = new Index(leaderIndexName, leaderUUID); + final ShardId leaderShardId = new ShardId(leaderIndex, shardId.getId()); + + final Client remoteClient = getRemoteClusterClient(); + + final String retentionLeaseId = + retentionLeaseId(localClusterName, indexShard.shardId().getIndex(), remoteClusterAlias, leaderIndex); + + acquireRetentionLeaseOnLeader(shardId, retentionLeaseId, leaderShardId, remoteClient); + + // schedule renewals to run during the restore + final Scheduler.Cancellable renewable = threadPool.scheduleWithFixedDelay( + () -> { + logger.trace("{} background renewal of retention lease [{}] during restore", indexShard.shardId(), retentionLeaseId); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the renewal is authorized + threadContext.markAsSystemContext(); + CcrRetentionLeases.asyncRenewRetentionLease( + leaderShardId, + retentionLeaseId, + RETAIN_ALL, + remoteClient, + ActionListener.wrap( + r -> {}, + e -> { + assert e instanceof ElasticsearchSecurityException == false : e; + logger.warn(new ParameterizedMessage( + "{} background renewal of retention lease [{}] failed during restore", + indexShard.shardId(), + retentionLeaseId), + e); + })); + } + }, + CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(indexShard.indexSettings().getNodeSettings()), + Ccr.CCR_THREAD_POOL_NAME); + + // TODO: There should be some local timeout. And if the remote cluster returns an unknown session + // response, we should be able to retry by creating a new session. + try (RestoreSession restoreSession = openSession(metadata.name(), remoteClient, leaderShardId, indexShard, recoveryState)) { + restoreSession.restoreFiles(); + updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); + } catch (Exception e) { + throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); + } finally { + logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId); + renewable.cancel(); + } + } + + private void createEmptyStore(final IndexShard indexShard, final ShardId shardId) { final Store store = indexShard.store(); store.incRef(); try { store.createEmpty(indexShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); - } catch (EngineException | IOException e) { + } catch (final EngineException | IOException e) { throw new IndexShardRecoveryException(shardId, "failed to create empty store", e); } finally { store.decRef(); } + } - Map ccrMetaData = indexShard.indexSettings().getIndexMetaData().getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); - String leaderUUID = ccrMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); - Index leaderIndex = new Index(shardId.getIndexName(), leaderUUID); - ShardId leaderShardId = new ShardId(leaderIndex, shardId.getId()); - - Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - // TODO: There should be some local timeout. And if the remote cluster returns an unknown session - // response, we should be able to retry by creating a new session. - String name = metadata.name(); - try (RestoreSession restoreSession = openSession(name, remoteClient, leaderShardId, indexShard, recoveryState)) { - restoreSession.restoreFiles(); - updateMappings(remoteClient, leaderIndex, restoreSession.mappingVersion, client, indexShard.routingEntry().index()); - } catch (Exception e) { - throw new IndexShardRestoreFailedException(indexShard.shardId(), "failed to restore snapshot [" + snapshotId + "]", e); - } + void acquireRetentionLeaseOnLeader( + final ShardId shardId, + final String retentionLeaseId, + final ShardId leaderShardId, + final Client remoteClient) { + logger.trace( + () -> new ParameterizedMessage("{} requesting leader to add retention lease [{}]", shardId, retentionLeaseId)); + final TimeValue timeout = ccrSettings.getRecoveryActionTimeout(); + final Optional maybeAddAlready = + syncAddRetentionLease(leaderShardId, retentionLeaseId, RETAIN_ALL, remoteClient, timeout); + maybeAddAlready.ifPresent(addAlready -> { + logger.trace(() -> new ParameterizedMessage( + "{} retention lease [{}] already exists, requesting a renewal", + shardId, + retentionLeaseId), + addAlready); + final Optional maybeRenewNotFound = + syncRenewRetentionLease(leaderShardId, retentionLeaseId, RETAIN_ALL, remoteClient, timeout); + maybeRenewNotFound.ifPresent(renewNotFound -> { + logger.trace(() -> new ParameterizedMessage( + "{} retention lease [{}] not found while attempting to renew, requesting a final add", + shardId, + retentionLeaseId), + renewNotFound); + final Optional maybeFallbackAddAlready = + syncAddRetentionLease(leaderShardId, retentionLeaseId, RETAIN_ALL, remoteClient, timeout); + maybeFallbackAddAlready.ifPresent(fallbackAddAlready -> { + /* + * At this point we tried to add the lease and the retention lease already existed. By the time we tried to renew the + * lease, it expired or was removed. We tried to add the lease again and it already exists? Bail. + */ + assert false : fallbackAddAlready; + throw fallbackAddAlready; + }); + }); + }); } @Override @@ -319,13 +432,13 @@ private void updateMappings(Client leaderClient, Index leaderIndex, long leaderM } } - private RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, + RestoreSession openSession(String repositoryName, Client remoteClient, ShardId leaderShardId, IndexShard indexShard, RecoveryState recoveryState) { String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(ccrSettings.getRecoveryActionTimeout()); return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShard, recoveryState, - response.getStoreFileMetaData(), response.getMappingVersion(), ccrSettings, throttledTime::inc); + response.getStoreFileMetaData(), response.getMappingVersion(), threadPool, ccrSettings, throttledTime::inc); } private static class RestoreSession extends FileRestoreContext implements Closeable { @@ -337,107 +450,136 @@ private static class RestoreSession extends FileRestoreContext implements Closea private final long mappingVersion; private final CcrSettings ccrSettings; private final LongConsumer throttleListener; + private final ThreadPool threadPool; RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, IndexShard indexShard, RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, long mappingVersion, - CcrSettings ccrSettings, LongConsumer throttleListener) { + ThreadPool threadPool, CcrSettings ccrSettings, LongConsumer throttleListener) { super(repositoryName, indexShard, SNAPSHOT_ID, recoveryState, Math.toIntExact(ccrSettings.getChunkSize().getBytes())); this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; this.sourceMetaData = sourceMetaData; this.mappingVersion = mappingVersion; + this.threadPool = threadPool; this.ccrSettings = ccrSettings; this.throttleListener = throttleListener; } void restoreFiles() throws IOException { - ArrayList fileInfos = new ArrayList<>(); + ArrayList fileInfos = new ArrayList<>(); for (StoreFileMetaData fileMetaData : sourceMetaData) { ByteSizeValue fileSize = new ByteSizeValue(fileMetaData.length()); - fileInfos.add(new BlobStoreIndexShardSnapshot.FileInfo(fileMetaData.name(), fileMetaData, fileSize)); + fileInfos.add(new FileInfo(fileMetaData.name(), fileMetaData, fileSize)); } SnapshotFiles snapshotFiles = new SnapshotFiles(LATEST, fileInfos); restore(snapshotFiles); } @Override - protected InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo) { - return new RestoreFileInputStream(remoteClient, sessionUUID, node, fileInfo.metadata(), ccrSettings, throttleListener); - } + protected void restoreFiles(List filesToRecover, Store store) throws IOException { + logger.trace("[{}] starting CCR restore of {} files", shardId, filesToRecover); + + try (MultiFileWriter multiFileWriter = new MultiFileWriter(store, recoveryState.getIndex(), "", logger, () -> { + })) { + final LocalCheckpointTracker requestSeqIdTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); + final AtomicReference> error = new AtomicReference<>(); + + for (FileInfo fileInfo : filesToRecover) { + final long fileLength = fileInfo.length(); + long offset = 0; + while (offset < fileLength && error.get() == null) { + final long requestSeqId = requestSeqIdTracker.generateSeqNo(); + try { + requestSeqIdTracker.waitForOpsToComplete(requestSeqId - ccrSettings.getMaxConcurrentFileChunks()); + + if (error.get() != null) { + requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + break; + } + + final int bytesRequested = Math.toIntExact( + Math.min(ccrSettings.getChunkSize().getBytes(), fileLength - offset)); + offset += bytesRequested; + + final GetCcrRestoreFileChunkRequest request = + new GetCcrRestoreFileChunkRequest(node, sessionUUID, fileInfo.name(), bytesRequested); + logger.trace("[{}] [{}] fetching chunk for file [{}], expected offset: {}, size: {}", shardId, snapshotId, + fileInfo.name(), offset, bytesRequested); + + TimeValue timeout = ccrSettings.getRecoveryActionTimeout(); + ActionListener listener = + ListenerTimeouts.wrapWithTimeout(threadPool, ActionListener.wrap( + r -> threadPool.generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); + requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + } + + @Override + protected void doRun() throws Exception { + final int actualChunkSize = r.getChunk().length(); + logger.trace("[{}] [{}] got response for file [{}], offset: {}, length: {}", shardId, + snapshotId, fileInfo.name(), r.getOffset(), actualChunkSize); + final long nanosPaused = ccrSettings.getRateLimiter().maybePause(actualChunkSize); + throttleListener.accept(nanosPaused); + final boolean lastChunk = r.getOffset() + actualChunkSize >= fileLength; + multiFileWriter.writeFileChunk(fileInfo.metadata(), r.getOffset(), r.getChunk(), lastChunk); + requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + } + }), + e -> { + error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); + requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + } + ), timeout, ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.NAME); + remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request, listener); + } catch (Exception e) { + error.compareAndSet(null, Tuple.tuple(fileInfo.metadata(), e)); + requestSeqIdTracker.markSeqNoAsCompleted(requestSeqId); + } + } + } + + try { + requestSeqIdTracker.waitForOpsToComplete(requestSeqIdTracker.getMaxSeqNo()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ElasticsearchException(e); + } + if (error.get() != null) { + handleError(store, error.get().v2()); + } + } - @Override - public void close() { - ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(sessionUUID, node); - ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = - remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); + logger.trace("[{}] completed CCR restore", shardId); } - } - - private static class RestoreFileInputStream extends InputStream { - - private final Client remoteClient; - private final String sessionUUID; - private final DiscoveryNode node; - private final StoreFileMetaData fileToRecover; - private final CombinedRateLimiter rateLimiter; - private final CcrSettings ccrSettings; - private final LongConsumer throttleListener; - private long pos = 0; - - private RestoreFileInputStream(Client remoteClient, String sessionUUID, DiscoveryNode node, StoreFileMetaData fileToRecover, - CcrSettings ccrSettings, LongConsumer throttleListener) { - this.remoteClient = remoteClient; - this.sessionUUID = sessionUUID; - this.node = node; - this.fileToRecover = fileToRecover; - this.ccrSettings = ccrSettings; - this.rateLimiter = ccrSettings.getRateLimiter(); - this.throttleListener = throttleListener; + private void handleError(Store store, Exception e) throws IOException { + final IOException corruptIndexException; + if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { + try { + store.markStoreCorrupted(corruptIndexException); + } catch (IOException ioe) { + logger.warn("store cannot be marked as corrupted", e); + } + throw corruptIndexException; + } else { + ExceptionsHelper.reThrowIfNotNull(e); + } } - @Override - public int read() throws IOException { + protected InputStream fileInputStream(FileInfo fileInfo) { throw new UnsupportedOperationException(); } @Override - public int read(byte[] bytes, int off, int len) throws IOException { - long remainingBytes = fileToRecover.length() - pos; - if (remainingBytes <= 0) { - return 0; - } - - int bytesRequested = (int) Math.min(remainingBytes, len); - - long nanosPaused = rateLimiter.maybePause(bytesRequested); - throttleListener.accept(nanosPaused); - - String fileName = fileToRecover.name(); - GetCcrRestoreFileChunkRequest request = new GetCcrRestoreFileChunkRequest(node, sessionUUID, fileName, bytesRequested); - GetCcrRestoreFileChunkAction.GetCcrRestoreFileChunkResponse response = - remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request).actionGet(ccrSettings.getRecoveryActionTimeout()); - BytesReference fileChunk = response.getChunk(); - - int bytesReceived = fileChunk.length(); - if (bytesReceived > bytesRequested) { - throw new IOException("More bytes [" + bytesReceived + "] received than requested [" + bytesRequested + "]"); - } - - long leaderOffset = response.getOffset(); - assert pos == leaderOffset : "Position [" + pos + "] should be equal to the leader file offset [" + leaderOffset + "]."; - - try (StreamInput streamInput = fileChunk.streamInput()) { - int bytesRead = streamInput.read(bytes, 0, bytesReceived); - assert bytesRead == bytesReceived : "Did not read the correct number of bytes"; - } - - pos += bytesReceived; - - return bytesReceived; + public void close() { + ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(sessionUUID, node); + ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = + remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); } - } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index f093143112d3d..1c3ab60dd27cb 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -42,8 +42,6 @@ import java.util.HashSet; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Consumer; import java.util.function.LongConsumer; public class CcrRestoreSourceService extends AbstractLifecycleComponent implements IndexEventListener { @@ -52,7 +50,6 @@ public class CcrRestoreSourceService extends AbstractLifecycleComponent implemen private final Map onGoingRestores = ConcurrentCollections.newConcurrentMap(); private final Map> sessionsForShard = new HashMap<>(); - private final CopyOnWriteArrayList> closeSessionListeners = new CopyOnWriteArrayList<>(); private final ThreadPool threadPool; private final CcrSettings ccrSettings; private final CounterMetric throttleTime = new CounterMetric(); @@ -93,12 +90,6 @@ protected synchronized void doClose() throws IOException { onGoingRestores.clear(); } - // TODO: The listeners are for testing. Once end-to-end file restore is implemented and can be tested, - // these should be removed. - public void addCloseSessionListener(Consumer listener) { - closeSessionListeners.add(listener); - } - public synchronized Store.MetadataSnapshot openSession(String sessionUUID, IndexShard indexShard) throws IOException { boolean success = false; RestoreSession restore = null; @@ -165,9 +156,7 @@ private void internalCloseSession(String sessionUUID, boolean throwIfSessionMiss } } } - closeSessionListeners.forEach(c -> c.accept(sessionUUID)); restore.decRef(); - } private Scheduler.Cancellable scheduleTimeout(String sessionUUID) { @@ -219,11 +208,7 @@ private Store.MetadataSnapshot getMetaData() throws IOException { } private long readFileBytes(String fileName, BytesReference reference) throws IOException { - Releasable lock = keyedLock.tryAcquire(fileName); - if (lock == null) { - throw new IllegalStateException("can't read from the same file on the same session concurrently"); - } - try (Releasable releasable = lock) { + try (Releasable ignored = keyedLock.acquire(fileName)) { final IndexInput indexInput = cachedInputs.computeIfAbsent(fileName, f -> { try { return commitRef.getIndexCommit().getDirectory().openInput(fileName, IOContext.READONCE); @@ -255,6 +240,7 @@ protected void closeInternal() { assert keyedLock.hasLockedKeys() == false : "Should not hold any file locks when closing"; timeoutTask.cancel(); IOUtils.closeWhileHandlingException(cachedInputs.values()); + IOUtils.closeWhileHandlingException(commitRef); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestForgetFollowerAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestForgetFollowerAction.java new file mode 100644 index 0000000000000..dc39aea372d81 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestForgetFollowerAction.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; + +import java.io.IOException; +import java.util.Objects; + +public class RestForgetFollowerAction extends BaseRestHandler { + + public RestForgetFollowerAction(final Settings settings, final RestController restController) { + super(Objects.requireNonNull(settings)); + Objects.requireNonNull(restController); + restController.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/forget_follower", this); + } + + @Override + public String getName() { + return "forget_follower_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { + final String leaderIndex = restRequest.param("index"); + + return channel -> { + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + final ForgetFollowerAction.Request request = ForgetFollowerAction.Request.fromXContent(parser, leaderIndex); + client.execute(ForgetFollowerAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } catch (final IOException e) { + channel.sendResponse(new BytesRestResponse(channel, RestStatus.INTERNAL_SERVER_ERROR, e)); + } + }; + + } + +} diff --git a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy index 45d92fd2b8aa1..16701ab74d8c9 100644 --- a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy +++ b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy @@ -1,50 +1,4 @@ grant { - // needed because of problems in unbound LDAP library - permission java.util.PropertyPermission "*", "read,write"; - - // required to configure the custom mailcap for watcher - permission java.lang.RuntimePermission "setFactory"; - - // needed when sending emails for javax.activation - // otherwise a classnotfound exception is thrown due to trying - // to load the class with the application class loader - permission java.lang.RuntimePermission "setContextClassLoader"; - permission java.lang.RuntimePermission "getClassLoader"; - // TODO: remove use of this jar as soon as possible!!!! - permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries"; - - // bouncy castle - permission java.security.SecurityPermission "putProviderProperty.BC"; - - // needed for x-pack security extension - permission java.security.SecurityPermission "createPolicy.JavaPolicy"; - permission java.security.SecurityPermission "getPolicy"; - permission java.security.SecurityPermission "setPolicy"; - // needed for multiple server implementations used in tests permission java.net.SocketPermission "*", "accept,connect"; - - // needed for Windows named pipes in machine learning - permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write"; }; - -grant codeBase "${codebase.netty-common}" { - // for reading the system-wide configuration for the backlog of established sockets - permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; -}; - -grant codeBase "${codebase.netty-transport}" { - // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 - // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! - permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; -}; - -grant codeBase "${codebase.elasticsearch-rest-client}" { - // rest client uses system properties which gets the default proxy - permission java.net.NetPermission "getProxySelector"; -}; - -grant codeBase "${codebase.httpasyncclient}" { - // rest client uses system properties which gets the default proxy - permission java.net.NetPermission "getProxySelector"; -}; \ No newline at end of file diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 11a14b45c5703..fd84725e4bd6e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; @@ -22,8 +23,11 @@ import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -34,15 +38,17 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.seqno.SeqNoStats; @@ -58,10 +64,14 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.NodeConfigurationSource; @@ -74,6 +84,7 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; @@ -99,10 +110,12 @@ import java.util.function.BooleanSupplier; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.empty; @@ -114,6 +127,18 @@ public abstract class CcrIntegTestCase extends ESTestCase { private static ClusterGroup clusterGroup; + protected Collection> nodePlugins() { + return Collections.emptyList(); + } + + protected Settings leaderClusterSettings() { + return Settings.EMPTY; + } + + protected Settings followerClusterSettings() { + return Settings.EMPTY; + } + @Before public final void startClusters() throws Exception { if (clusterGroup != null && reuseClusters()) { @@ -125,10 +150,10 @@ public final void startClusters() throws Exception { stopClusters(); Collection> mockPlugins = Arrays.asList(ESIntegTestCase.TestSeedPlugin.class, MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, - MockNioTransportPlugin.class); + MockNioTransportPlugin.class, InternalSettingsPlugin.class); InternalTestCluster leaderCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), - numberOfNodesPerCluster(), UUIDs.randomBase64UUID(random()), createNodeConfigurationSource(null), 0, "leader", mockPlugins, + numberOfNodesPerCluster(), "leader_cluster", createNodeConfigurationSource(null, true), 0, "leader", mockPlugins, Function.identity()); leaderCluster.beforeTest(random(), 0.0D); leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); @@ -139,7 +164,7 @@ public final void startClusters() throws Exception { String address = leaderCluster.getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); InternalTestCluster followerCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), - numberOfNodesPerCluster(), UUIDs.randomBase64UUID(random()), createNodeConfigurationSource(address), 0, "follower", + numberOfNodesPerCluster(), "follower_cluster", createNodeConfigurationSource(address, false), 0, "follower", mockPlugins, Function.identity()); clusterGroup = new ClusterGroup(leaderCluster, followerCluster); @@ -186,7 +211,7 @@ public void afterTest() throws Exception { } } - private NodeConfigurationSource createNodeConfigurationSource(String leaderSeedAddress) { + private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) { Settings.Builder builder = Settings.builder(); builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); // Default the watermarks to absurdly low to prevent the tests @@ -208,6 +233,11 @@ private NodeConfigurationSource createNodeConfigurationSource(String leaderSeedA builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); // Let cluster state api return quickly in order to speed up auto follow tests: builder.put(CcrSettings.CCR_WAIT_FOR_METADATA_TIMEOUT.getKey(), TimeValue.timeValueMillis(100)); + if (leaderCluster) { + builder.put(leaderClusterSettings()); + } else { + builder.put(followerClusterSettings()); + } if (configureRemoteClusterViaNodeSettings() && leaderSeedAddress != null) { builder.put("cluster.remote.leader_cluster.seeds", leaderSeedAddress); } @@ -224,7 +254,10 @@ public Path nodeConfigPath(int nodeOrdinal) { @Override public Collection> nodePlugins() { - return Arrays.asList(LocalStateCcr.class, CommonAnalysisPlugin.class); + return Stream.concat( + Stream.of(LocalStateCcr.class, CommonAnalysisPlugin.class), + CcrIntegTestCase.this.nodePlugins().stream()) + .collect(Collectors.toList()); } @Override @@ -357,13 +390,18 @@ protected void ensureEmptyWriteBuffers() throws Exception { protected void pauseFollow(String... indices) throws Exception { for (String index : indices) { final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(index); - followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).get(); + assertAcked(followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).actionGet()); } ensureNoCcrTasks(); } protected void ensureNoCcrTasks() throws Exception { assertBusy(() -> { + CcrStatsAction.Response statsResponse = + followerClient().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.Request()).actionGet(); + assertThat("Follow stats not empty: " + Strings.toString(statsResponse.getFollowStats()), + statsResponse.getFollowStats().getStatsResponses(), empty()); + final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); assertThat(tasks.tasks(), empty()); @@ -390,6 +428,7 @@ protected String getIndexSettings(final int numberOfShards, final int numberOfRe builder.startObject("settings"); { builder.field(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); + builder.field(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s"); builder.field("index.number_of_shards", numberOfShards); builder.field("index.number_of_replicas", numberOfReplicas); for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { @@ -452,8 +491,18 @@ protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String f logger.info("--> asserting <> between {} and {}", leaderIndex, followerIndex); assertBusy(() -> { Map> docsOnFollower = getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex); - logger.info("--> docs on the follower {}", docsOnFollower); - assertThat(docsOnFollower, equalTo(getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex))); + Map> docsOnLeader = getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex); + Map> mismatchedDocs = new HashMap<>(); + for (Map.Entry> fe : docsOnFollower.entrySet()) { + Set d1 = Sets.difference( + Sets.newHashSet(fe.getValue()), Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList()))); + Set d2 = Sets.difference( + Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList())), Sets.newHashSet(fe.getValue())); + if (d1.isEmpty() == false || d2.isEmpty() == false) { + mismatchedDocs.put(fe.getKey(), Sets.union(d1, d2)); + } + } + assertThat("mismatched documents [" + mismatchedDocs + "]", docsOnFollower, equalTo(docsOnLeader)); }, 120, TimeUnit.SECONDS); logger.info("--> asserting seq_no_stats between {} and {}", leaderIndex, followerIndex); @@ -482,14 +531,21 @@ private Map> getDocIdAndSeqNos(InternalTestClus Randomness.shuffle(shardRoutings); final Map> docs = new HashMap<>(); for (ShardRouting shardRouting : shardRoutings) { - if (shardRouting == null || shardRouting.assignedToNode() == false || docs.containsKey(shardRouting.shardId().id())) { + if (shardRouting == null || shardRouting.assignedToNode() == false) { continue; } IndexShard indexShard = cluster.getInstance(IndicesService.class, state.nodes().get(shardRouting.currentNodeId()).getName()) .indexServiceSafe(shardRouting.index()).getShard(shardRouting.id()); - docs.put(shardRouting.shardId().id(), IndexShardTestCase.getDocIdAndSeqNos(indexShard).stream() - .map(d -> new DocIdSeqNoAndTerm(d.getId(), d.getSeqNo(), 1L)) // normalize primary term as the follower use its own term - .collect(Collectors.toList())); + try { + final List docsOnShard = IndexShardTestCase.getDocIdAndSeqNos(indexShard); + logger.info("--> shard {} docs {} seq_no_stats {}", shardRouting, docsOnShard, indexShard.seqNoStats()); + docs.put(shardRouting.shardId().id(), docsOnShard.stream() + // normalize primary term as the follower use its own term + .map(d -> new DocIdSeqNoAndTerm(d.getId(), d.getSeqNo(), 1L)) + .collect(Collectors.toList())); + } catch (AlreadyClosedException e) { + // Ignore this exception and try getting List from other IndexShard instance. + } } return docs; } @@ -622,6 +678,61 @@ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTim return lastKnownCount.get(); } + protected ActionListener waitForRestore( + final ClusterService clusterService, + final ActionListener listener) { + return new ActionListener() { + + @Override + public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { + if (restoreCompletionResponse.getRestoreInfo() == null) { + final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); + final String uuid = restoreCompletionResponse.getUuid(); + + final ClusterStateListener clusterStateListener = new ClusterStateListener() { + + @Override + public void clusterChanged(ClusterChangedEvent changedEvent) { + final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); + final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); + if (prevEntry == null) { + /* + * When there is a master failure after a restore has been started, this listener might not be registered + * on the current master and as such it might miss some intermediary cluster states due to batching. + * Clean up the listener in that case and acknowledge completion of restore operation to client. + */ + clusterService.removeListener(this); + listener.onResponse(null); + } else if (newEntry == null) { + clusterService.removeListener(this); + ImmutableOpenMap shards = prevEntry.shards(); + RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), + prevEntry.indices(), + shards.size(), + shards.size() - RestoreService.failedShards(shards)); + logger.debug("restore of [{}] completed", snapshot); + listener.onResponse(ri); + } else { + // restore not completed yet, wait for next cluster state update + } + } + + }; + + clusterService.addListener(clusterStateListener); + } else { + listener.onResponse(restoreCompletionResponse.getRestoreInfo()); + } + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + + }; + } + static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index c2760aa5efd6b..0316482571eb2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.service.ClusterService; @@ -15,6 +17,7 @@ import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; @@ -30,6 +33,7 @@ import java.util.Collection; import java.util.Collections; +import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.CcrIntegTestCase.removeCCRRelatedMetadataFromClusterState; @@ -57,11 +61,17 @@ protected Collection> getPlugins() { } @Before - public void setupLocalRemote() { + public void setupLocalRemote() throws Exception { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); String address = getInstanceFromNode(TransportService.class).boundAddress().publishAddress().toString(); updateSettingsRequest.transientSettings(Settings.builder().put("cluster.remote.local.seeds", address)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + List infos = client().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); + assertThat(infos.size(), equalTo(1)); + assertThat(infos.get(0).getNumNodesConnected(), equalTo(1)); + }); } @Before @@ -76,10 +86,15 @@ public void purgeCCRMetadata() throws Exception { } @After - public void removeLocalRemote() { + public void removeLocalRemote() throws Exception { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); updateSettingsRequest.transientSettings(Settings.builder().put("cluster.remote.local.seeds", (String) null)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + List infos = client().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); + assertThat(infos.size(), equalTo(0)); + }); } protected AutoFollowStats getAutoFollowStats() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index f12dcea4af9b5..0bcb3daac6284 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -31,11 +31,14 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class AutoFollowIT extends CcrIntegTestCase { @@ -131,47 +134,75 @@ public void testAutoFollowManyIndices() throws Exception { .build(); putAutoFollowPatterns("my-pattern", new String[] {"logs-*"}); - int numIndices = randomIntBetween(4, 32); + long numIndices = randomIntBetween(4, 8); for (int i = 0; i < numIndices; i++) { createLeaderIndex("logs-" + i, leaderIndexSettings); } - int expectedVal1 = numIndices; - assertBusy(() -> { - AutoFollowStats autoFollowStats = getAutoFollowStats(); - assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo((long) expectedVal1)); - }); + long expectedVal1 = numIndices; + MetaData[] metaData = new MetaData[1]; + AutoFollowStats[] autoFollowStats = new AutoFollowStats[1]; + try { + assertBusy(() -> { + metaData[0] = getFollowerCluster().clusterService().state().metaData(); + autoFollowStats[0] = getAutoFollowStats(); + + assertThat(metaData[0].indices().size(), equalTo((int) expectedVal1)); + AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1)); + assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1)); + }, 30, TimeUnit.SECONDS); + } catch (AssertionError ae) { + logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); + logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); + throw ae; + } // Delete auto follow pattern and make sure that in the background the auto follower has stopped // then the leader index created after that should never be auto followed: deleteAutoFollowPatternSetting(); - assertBusy(() -> { - AutoFollowStats autoFollowStats = getAutoFollowStats(); - assertThat(autoFollowStats.getAutoFollowedClusters().size(), equalTo(0)); - }); + try { + assertBusy(() -> { + metaData[0] = getFollowerCluster().clusterService().state().metaData(); + autoFollowStats[0] = getAutoFollowStats(); + + assertThat(metaData[0].indices().size(), equalTo((int )expectedVal1)); + AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue()); + assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0)); + }, 30, TimeUnit.SECONDS); + } catch (AssertionError ae) { + logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); + logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); + throw ae; + } createLeaderIndex("logs-does-not-count", leaderIndexSettings); putAutoFollowPatterns("my-pattern", new String[] {"logs-*"}); - int i = numIndices; - numIndices = numIndices + randomIntBetween(4, 32); + long i = numIndices; + numIndices = numIndices + randomIntBetween(4, 8); for (; i < numIndices; i++) { createLeaderIndex("logs-" + i, leaderIndexSettings); } - int expectedVal2 = numIndices; + long expectedVal2 = numIndices; - MetaData[] metaData = new MetaData[1]; - AutoFollowStats[] autoFollowStats = new AutoFollowStats[1]; try { assertBusy(() -> { - metaData[0] = followerClient().admin().cluster().prepareState().get().getState().metaData(); + metaData[0] = getFollowerCluster().clusterService().state().metaData(); autoFollowStats[0] = getAutoFollowStats(); - int count = (int) Arrays.stream(metaData[0].getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count(); + + assertThat(metaData[0].indices().size(), equalTo((int) expectedVal2)); + AutoFollowMetadata autoFollowMetadata = metaData[0].custom(AutoFollowMetadata.TYPE); + // expectedVal2 + 1, because logs-does-not-count is also marked as auto followed. + // (This is because indices created before a pattern exists are not auto followed and are just marked as such.) + assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal2 + 1)); + long count = Arrays.stream(metaData[0].getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count(); assertThat(count, equalTo(expectedVal2)); // Ensure that there are no auto follow errors: // (added specifically to see that there are no leader indices auto followed multiple times) assertThat(autoFollowStats[0].getRecentAutoFollowErrors().size(), equalTo(0)); - }); + }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { - logger.warn("metadata={}", Strings.toString(metaData[0])); + logger.warn("indices={}", Arrays.toString(metaData[0].indices().keys().toArray(String.class))); logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0])); throw ae; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java index 5dab22500a600..26182781233e2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java @@ -45,11 +45,11 @@ protected AutoFollowMetadata createTestInstance() { leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 9f061b9c33099..14385bf4d6812 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -16,30 +16,26 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.repositories.GetCcrRestoreFileChunkAction; +import org.elasticsearch.xpack.ccr.action.repositories.PutCcrRestoreSessionAction; import org.elasticsearch.xpack.ccr.repository.CcrRepository; import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService; @@ -48,19 +44,17 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.singletonMap; -import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; -// TODO: Fold this integration test into a more expansive integration test as more bootstrap from remote work -// TODO: is completed. public class CcrRepositoryIT extends CcrIntegTestCase { private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); @@ -197,36 +191,6 @@ public void testDocsAreRecovered() throws Exception { leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); - AtomicBoolean isRunning = new AtomicBoolean(true); - - // Concurrently index new docs with mapping changes - Thread thread = new Thread(() -> { - char[] chars = "abcdeghijklmnopqrstuvwxyz".toCharArray(); - for (char c : chars) { - if (isRunning.get() == false) { - break; - } - final String source; - long l = randomLongBetween(0, 50000); - if (randomBoolean()) { - source = String.format(Locale.ROOT, "{\"%c\":%d}", c, l); - } else { - source = String.format(Locale.ROOT, "{\"%c\":\"%d\"}", c, l); - } - for (int i = 64; i < 150; i++) { - if (isRunning.get() == false) { - break; - } - leaderClient().prepareIndex("index1", "doc", Long.toString(i)).setSource(source, XContentType.JSON).get(); - if (rarely()) { - leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).get(); - } - } - leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); - } - }); - thread.start(); - Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); @@ -245,9 +209,6 @@ public void testDocsAreRecovered() throws Exception { assertExpectedDocument(followerIndex, i); } - isRunning.set(false); - thread.join(); - settingsRequest = new ClusterUpdateSettingsRequest(); ByteSizeValue defaultValue = CcrSettings.RECOVERY_CHUNK_SIZE.getDefault(Settings.EMPTY); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), defaultValue)); @@ -325,7 +286,6 @@ public void testRateLimitingIsEmployed() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38027") public void testIndividualActionsTimeout() throws Exception { ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); TimeValue timeValue = TimeValue.timeValueMillis(100); @@ -348,7 +308,8 @@ public void testIndividualActionsTimeout() throws Exception { MockTransportService mockTransportService = (MockTransportService) transportService; transportServices.add(mockTransportService); mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { - if (action.equals(GetCcrRestoreFileChunkAction.NAME) == false) { + if (action.equals(GetCcrRestoreFileChunkAction.NAME) == false && + action.equals(TransportActionProxy.getProxyAction(GetCcrRestoreFileChunkAction.NAME)) == false) { connection.sendRequest(requestId, action, request, options); } }); @@ -370,33 +331,39 @@ public void testIndividualActionsTimeout() throws Exception { .renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS)) .indexSettings(settingsBuilder); - final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); - final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); - - // Depending on when the timeout occurs this can fail in two ways. If it times-out when fetching - // metadata this will throw an exception. If it times-out when restoring a shard, the shard will - // be marked as failed. Either one is a success for the purpose of this test. try { - RestoreInfo restoreInfo = future.actionGet(); - assertThat(restoreInfo.failedShards(), greaterThan(0)); - assertThat(restoreInfo.successfulShards(), lessThan(restoreInfo.totalShards())); - assertEquals(numberOfPrimaryShards, restoreInfo.totalShards()); - } catch (Exception e) { - assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchTimeoutException.class)); - } - + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + // Depending on when the timeout occurs this can fail in two ways. If it times-out when fetching + // metadata this will throw an exception. If it times-out when restoring a shard, the shard will + // be marked as failed. Either one is a success for the purpose of this test. + try { + RestoreInfo restoreInfo = future.actionGet(); + assertThat(restoreInfo.failedShards(), greaterThan(0)); + assertThat(restoreInfo.successfulShards(), lessThan(restoreInfo.totalShards())); + assertEquals(numberOfPrimaryShards, restoreInfo.totalShards()); + } catch (Exception e) { + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchTimeoutException.class)); + } + } finally { + for (MockTransportService transportService : transportServices) { + transportService.clearAllRules(); + } - for (MockTransportService transportService : transportServices) { - transportService.clearAllRules(); + settingsRequest = new ClusterUpdateSettingsRequest(); + TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY); + settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), + defaultValue)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + // This test sets individual action timeouts low to attempt to replicated timeouts. Although the + // clear session action is not blocked, it is possible that it will still occasionally timeout. + // By wiping the leader index here, we ensure we do not trigger the index commit hanging around + // assertion because the commit is released when the index shard is closed. + getLeaderCluster().wipeIndices(leaderIndex); } - - settingsRequest = new ClusterUpdateSettingsRequest(); - TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY); - settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), - defaultValue)); - assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); } public void testFollowerMappingIsUpdated() throws IOException { @@ -421,23 +388,60 @@ public void testFollowerMappingIsUpdated() throws IOException { .renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS)) .indexSettings(settingsBuilder); - final String source = String.format(Locale.ROOT, "{\"k\":%d}", 1); - leaderClient().prepareIndex("index1", "doc", Long.toString(1)).setSource(source, XContentType.JSON).get(); - PlainActionFuture future = PlainActionFuture.newFuture(); - restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); - RestoreInfo restoreInfo = future.actionGet(); + List transportServices = new ArrayList<>(); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean updateSent = new AtomicBoolean(false); + Runnable updateMappings = () -> { + if (updateSent.compareAndSet(false, true)) { + leaderClient() + .admin() + .indices() + .preparePutMapping(leaderIndex) + .setType("doc") + .setSource("{\"properties\":{\"k\":{\"type\":\"long\"}}}", XContentType.JSON) + .execute(ActionListener.wrap(latch::countDown)); + } + try { + latch.await(); + } catch (InterruptedException e) { + throw ExceptionsHelper.convertToRuntime(e); + } + }; - assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); - assertEquals(0, restoreInfo.failedShards()); + for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + transportServices.add(mockTransportService); + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(PutCcrRestoreSessionAction.NAME)) { + updateMappings.run(); + connection.sendRequest(requestId, action, request, options); + } else { + connection.sendRequest(requestId, action, request, options); + } + }); + } + + try { + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + RestoreInfo restoreInfo = future.actionGet(); - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.clear(); - clusterStateRequest.metaData(true); - clusterStateRequest.indices(followerIndex); - MappingMetaData mappingMetaData = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings() - .get("index2").get("doc"); - assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long")); + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(followerIndex); + MappingMetaData mappingMetaData = followerClient().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long")); + } finally { + for (MockTransportService transportService : transportServices) { + transportService.clearAllRules(); + } + } } private void assertExpectedDocument(String followerIndex, final int value) { @@ -447,51 +451,4 @@ private void assertExpectedDocument(String followerIndex, final int value) { assertThat(getResponse.getSource().get("f"), equalTo(value)); } - private ActionListener waitForRestore(ClusterService clusterService, - ActionListener listener) { - return new ActionListener() { - @Override - public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { - if (restoreCompletionResponse.getRestoreInfo() == null) { - final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); - final String uuid = restoreCompletionResponse.getUuid(); - - ClusterStateListener clusterStateListener = new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent changedEvent) { - final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); - final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); - if (prevEntry == null) { - // When there is a master failure after a restore has been started, this listener might not be registered - // on the current master and as such it might miss some intermediary cluster states due to batching. - // Clean up listener in that case and acknowledge completion of restore operation to client. - clusterService.removeListener(this); - listener.onResponse(null); - } else if (newEntry == null) { - clusterService.removeListener(this); - ImmutableOpenMap shards = prevEntry.shards(); - RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), - prevEntry.indices(), - shards.size(), - shards.size() - RestoreService.failedShards(shards)); - logger.debug("restore of [{}] completed", snapshot); - listener.onResponse(ri); - } else { - // restore not completed yet, wait for next cluster state update - } - } - }; - - clusterService.addListener(clusterStateListener); - } else { - listener.onResponse(restoreCompletionResponse.getRestoreInfo()); - } - } - - @Override - public void onFailure(Exception t) { - listener.onFailure(t); - } - }; - } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java new file mode 100644 index 0000000000000..0dca0ffea2259 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -0,0 +1,1073 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.index.seqno.RetentionLeases; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +public class CcrRetentionLeaseIT extends CcrIntegTestCase { + + public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return Collections.singletonList(CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING); + } + + } + + public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { + + @Override + public List> getSettings() { + return Collections.singletonList(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING); + } + + } + + @Override + protected Collection> nodePlugins() { + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(RetentionLeaseRenewIntervalSettingPlugin.class, RetentionLeaseSyncIntervalSettingPlugin.class)) + .collect(Collectors.toList()); + } + + @Override + protected Settings followerClusterSettings() { + return Settings.builder() + .put(CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200)) + .build(); + } + + private final IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + + private RestoreSnapshotRequest setUpRestoreSnapshotRequest( + final String leaderIndex, + final int numberOfShards, + final int numberOfReplicas, + final String followerIndex, + final int numberOfDocuments) throws IOException { + final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); + settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + + final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; + + final Map additionalSettings = new HashMap<>(); + additionalSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"); + additionalSettings.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen(leaderIndex); + + logger.info("indexing [{}] docs", numberOfDocuments); + for (int i = 0; i < numberOfDocuments; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex(leaderIndex, "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + if (rarely()) { + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + } + } + + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + + final Settings.Builder settingsBuilder = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex) + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + return new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST) + .indexSettings(settingsBuilder) + .indices(leaderIndex) + .indicesOptions(indicesOptions) + .renamePattern("^(.*)$") + .renameReplacement(followerIndex) + .masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS)); + } + + public void testRetentionLeaseIsTakenAtTheStartOfRecovery() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + // ensure that a retention lease has been put in place on each shard + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); + } + }); + + final RestoreInfo restoreInfo = future.actionGet(); + + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; ++i) { + assertExpectedDocument(followerIndex, i); + } + + } + + public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final CountDownLatch latch = new CountDownLatch(1); + + // block the recovery from completing; this ensures the background sync is still running + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( + (connection, requestId, action, request, options) -> { + if (ClearCcrRestoreSessionAction.NAME.equals(action) + || TransportActionProxy.getProxyAction(ClearCcrRestoreSessionAction.NAME).equals(action)) { + try { + latch.await(); + } catch (final InterruptedException e) { + fail(e.toString()); + } + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + try { + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + latch.countDown(); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } + + final RestoreInfo restoreInfo = future.actionGet(); + + assertEquals(restoreInfo.totalShards(), restoreInfo. + + successfulShards()); + + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; i++) { + assertExpectedDocument(followerIndex, i); + } + + } + + public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { + final String leaderIndex = "leader"; + final int numberOfShards = randomIntBetween(1, 3); + final int numberOfReplicas = between(0, 1); + final String followerIndex = "follower"; + final int numberOfDocuments = scaledRandomIntBetween(1, 8192); + final RestoreSnapshotRequest restoreRequest = + setUpRestoreSnapshotRequest(leaderIndex, numberOfShards, numberOfReplicas, followerIndex, numberOfDocuments); + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + + final PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + final RestoreInfo restoreInfo = future.actionGet(); + final long start = System.nanoTime(); + + /* + * We want to ensure that the retention leases have been synced to all shard copies, as otherwise they might sync between the two + * times that we sample the retention leases, which would cause our check to fail. + */ + final TimeValue syncIntervalSetting = IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.get( + leaderClient() + .admin() + .indices() + .prepareGetSettings(leaderIndex) + .get() + .getIndexToSettings() + .get(leaderIndex)); + final long syncEnd = System.nanoTime(); + Thread.sleep(Math.max(0, randomIntBetween(2, 4) * syncIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(syncEnd - start))); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + /* + * We want to ensure that the background renewal is cancelled at the end of recovery. To do this, we will sleep a small multiple + * of the renew interval. If the renews are not cancelled, we expect that a renewal would have been sent while we were sleeping. + * After we wake up, it should be the case that the retention leases are the same (same timestamp) as that indicates that they were + * not renewed while we were sleeping. + */ + assertBusy(() -> { + // sample the leases after recovery + final List retentionLeases = new ArrayList<>(); + assertBusy(() -> { + retentionLeases.clear(); + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + final String expectedRetentionLeaseId = retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)); + assertThat(retentionLease.id(), equalTo(expectedRetentionLeaseId)); + retentionLeases.add(currentRetentionLeases); + } + }); + // sleep a small multiple of the renew interval + final TimeValue renewIntervalSetting = CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(followerClusterSettings()); + final long renewEnd = System.nanoTime(); + Thread.sleep( + Math.max(0, randomIntBetween(2, 4) * renewIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(renewEnd - start))); + + // now ensure that the retention leases are the same + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + if (shardsStats.get(i).getShardRouting().primary() == false) { + continue; + } + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + // we assert that retention leases are being renewed by an increase in the timestamp + assertThat(retentionLease.timestamp(), equalTo(retentionLeases.get(i).leases().iterator().next().timestamp())); + } + }); + + assertEquals(restoreInfo.totalShards(), restoreInfo.successfulShards()); + assertEquals(0, restoreInfo.failedShards()); + for (int i = 0; i < numberOfDocuments; ++i) { + assertExpectedDocument(followerIndex, i); + } + } + + public void testUnfollowRemovesRetentionLeases() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final String leaderIndexSettings = + getIndexSettings(numberOfShards, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + final String retentionLeaseId = getRetentionLeaseId(followerIndex, leaderIndex); + + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + final List shardsStats = getShardsStats(stats); + for (final ShardStats shardStats : shardsStats) { + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), hasSize(1)); + assertThat( + shardStats.getRetentionLeaseStats().retentionLeases().leases().iterator().next().id(), + equalTo(retentionLeaseId)); + } + + // we will sometimes fake that some of the retention leases are already removed on the leader shard + final Set shardIds = + new HashSet<>(randomSubsetOf( + randomIntBetween(0, numberOfShards), + IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet()))); + + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( + (connection, requestId, action, request, options) -> { + if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Remove.ACTION_NAME).equals(action)) { + final RetentionLeaseActions.RemoveRequest removeRequest = (RetentionLeaseActions.RemoveRequest) request; + if (shardIds.contains(removeRequest.getShardId().id())) { + final String primaryShardNodeId = + getLeaderCluster() + .clusterService() + .state() + .routingTable() + .index(leaderIndex) + .shard(removeRequest.getShardId().id()) + .primaryShard() + .currentNodeId(); + final String primaryShardNodeName = + getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = + getLeaderCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(removeRequest.getShardId()); + final CountDownLatch latch = new CountDownLatch(1); + primary.removeRetentionLease( + retentionLeaseId, + ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()))); + try { + latch.await(); + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e.toString()); + } + } + } + connection.sendRequest(requestId, action, request, options); + }); + } + + pauseFollow(followerIndex); + assertAcked(followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet()); + assertAcked(followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet()); + + final IndicesStatsResponse afterUnfollowStats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); + for (final ShardStats shardStats : afterUnfollowShardsStats) { + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + } + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } + } + + public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final String leaderIndexSettings = + getIndexSettings(numberOfShards, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + pauseFollow(followerIndex); + followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet(); + + // we will disrupt requests to remove retention leases for these random shards + final Set shardIds = + new HashSet<>(randomSubsetOf( + randomIntBetween(1, numberOfShards), + IntStream.range(0, numberOfShards).boxed().collect(Collectors.toSet()))); + + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( + (connection, requestId, action, request, options) -> { + if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Remove.ACTION_NAME).equals(action)) { + final RetentionLeaseActions.RemoveRequest removeRequest = (RetentionLeaseActions.RemoveRequest) request; + if (shardIds.contains(removeRequest.getShardId().id())) { + throw randomBoolean() + ? new ConnectTransportException(connection.getNode(), "connection failed") + : new IndexShardClosedException(removeRequest.getShardId()); + } + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet()); + + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + assertThat( + e.getMetadata("es.failed_to_remove_retention_leases"), + contains(retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)))); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } + } + + public void testRetentionLeaseRenewedWhileFollowing() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final int numberOfReplicas = randomIntBetween(0, 1); + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } + + public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final int numberOfReplicas = randomIntBetween(0, 1); + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + final int numberOfDocuments = randomIntBetween(128, 2048); + logger.debug("indexing [{}] docs", numberOfDocuments); + for (int i = 0; i < numberOfDocuments; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex(leaderIndex, "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + if (rarely()) { + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + } + } + + // wait until the follower global checkpoints have caught up to the leader + assertIndexFullyReplicatedToFollower(leaderIndex, followerIndex); + + final List leaderShardsStats = getShardsStats(leaderClient().admin().indices().prepareStats(leaderIndex).get()); + final Map leaderGlobalCheckpoints = new HashMap<>(); + for (final ShardStats leaderShardStats : leaderShardsStats) { + final ShardRouting routing = leaderShardStats.getShardRouting(); + if (routing.primary() == false) { + continue; + } + leaderGlobalCheckpoints.put(routing.id(), leaderShardStats.getSeqNoStats().getGlobalCheckpoint()); + } + + // now assert that the retention leases have advanced to the global checkpoints + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); + // we assert that retention leases are being advanced + assertThat( + retentionLease.retainingSequenceNumber(), + equalTo(leaderGlobalCheckpoints.get(shardsStats.get(i).getShardRouting().id()))); + } + }); + } + + public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final int numberOfReplicas = randomIntBetween(0, 1); + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + final long start = System.nanoTime(); + pauseFollow(followerIndex); + + /* + * We want to ensure that the retention leases have been synced to all shard copies, as otherwise they might sync between the two + * times that we sample the retention leases, which would cause our check to fail. + */ + final TimeValue syncIntervalSetting = IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.get( + leaderClient() + .admin() + .indices() + .prepareGetSettings(leaderIndex) + .get() + .getIndexToSettings() + .get(leaderIndex)); + final long syncEnd = System.nanoTime(); + Thread.sleep(Math.max(0, randomIntBetween(2, 4) * syncIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(syncEnd - start))); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + /* + * We want to ensure that the background renewal is cancelled after pausing. To do this, we will sleep a small multiple of the renew + * interval. If the renews are not cancelled, we expect that a renewal would have been sent while we were sleeping. After we wake + * up, it should be the case that the retention leases are the same (same timestamp) as that indicates that they were not renewed + * while we were sleeping. + */ + assertBusy(() -> { + // sample the leases after pausing + final List retentionLeases = new ArrayList<>(); + assertBusy(() -> { + retentionLeases.clear(); + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + final String expectedRetentionLeaseId = retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)); + assertThat(retentionLease.id(), equalTo(expectedRetentionLeaseId)); + retentionLeases.add(currentRetentionLeases); + } + }); + // sleep a small multiple of the renew interval + final TimeValue renewIntervalSetting = CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(followerClusterSettings()); + final long renewEnd = System.nanoTime(); + Thread.sleep( + Math.max(0, randomIntBetween(2, 4) * renewIntervalSetting.millis() - TimeUnit.NANOSECONDS.toMillis(renewEnd - start))); + + // now ensure that the retention leases are the same + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + if (shardsStats.get(i).getShardRouting().primary() == false) { + continue; + } + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID))); + // we assert that retention leases are not being renewed by an unchanged timestamp + assertThat(retentionLease.timestamp(), equalTo(retentionLeases.get(i).leases().iterator().next().timestamp())); + } + }); + } + + public void testRetentionLeaseRenewalIsResumedWhenFollowingIsResumed() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final int numberOfReplicas = randomIntBetween(0, 1); + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + pauseFollow(followerIndex); + + followerClient().execute(ResumeFollowAction.INSTANCE, resumeFollow(followerIndex)).actionGet(); + + ensureFollowerGreen(true, followerIndex); + + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } + + public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = 1; + final int numberOfReplicas = 1; + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + final CountDownLatch latch = new CountDownLatch(1); + + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( + (connection, requestId, action, request, options) -> { + if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { + senderTransportService.clearAllRules(); + final RetentionLeaseActions.RenewRequest renewRequest = (RetentionLeaseActions.RenewRequest) request; + final String primaryShardNodeId = + getLeaderCluster() + .clusterService() + .state() + .routingTable() + .index(leaderIndex) + .shard(renewRequest.getShardId().id()) + .primaryShard() + .currentNodeId(); + final String primaryShardNodeName = + getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = + getLeaderCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(renewRequest.getShardId()); + final CountDownLatch innerLatch = new CountDownLatch(1); + // this forces the background renewal from following to face a retention lease not found exception + primary.removeRetentionLease( + getRetentionLeaseId(followerIndex, leaderIndex), + ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); + + try { + innerLatch.await(); + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e.toString()); + } + + latch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + latch.await(); + + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } + } + + /** + * This test is fairly evil. This test is to ensure that we are protected against a race condition when unfollowing and a background + * renewal fires. The action of unfollowing will remove retention leases from the leader. If a background renewal is firing at that + * time, it means that we will be met with a retention lease not found exception. That will in turn trigger behavior to attempt to + * re-add the retention lease, which means we are left in a situation where we have unfollowed, but the retention lease still remains + * on the leader. However, we have a guard against this in the callback after the retention lease not found exception is thrown, which + * checks if the shard follow node task is cancelled or completed. + * + * To test this this behavior is correct, we capture the call to renew the retention lease. Then, we will step in between and execute + * an unfollow request. This will remove the retention lease on the leader. At this point, we can unlatch the renew call, which will + * now be met with a retention lease not found exception. We will cheat and wait for that response to come back, and then synchronously + * trigger the listener which will check to see if the shard follow node task is cancelled or completed, and if not, add the retention + * lease back. After that listener returns, we can check to see if a retention lease exists on the leader. + * + * Note, this done mean that listener will fire twice, once in our onResponseReceived hook, and once after our onResponseReceived + * callback returns. 🤷‍♀️ + * + * @throws Exception if an exception occurs in the main test thread + */ + public void testPeriodicRenewalDoesNotAddRetentionLeaseAfterUnfollow() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = 1; + final int numberOfReplicas = 1; + final Map additionalIndexSettings = new HashMap<>(); + additionalIndexSettings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), Boolean.toString(true)); + additionalIndexSettings.put( + IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), + TimeValue.timeValueMillis(200).getStringRep()); + final String leaderIndexSettings = getIndexSettings(numberOfShards, numberOfReplicas, additionalIndexSettings); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + final CountDownLatch removeLeaseLatch = new CountDownLatch(1); + final CountDownLatch unfollowLatch = new CountDownLatch(1); + final CountDownLatch responseLatch = new CountDownLatch(1); + + final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); + + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( + (connection, requestId, action, request, options) -> { + if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { + final String retentionLeaseId = getRetentionLeaseId(followerIndex, leaderIndex); + try { + removeLeaseLatch.countDown(); + unfollowLatch.await(); + + senderTransportService.transport().addMessageListener(new TransportMessageListener() { + + @SuppressWarnings("rawtypes") + @Override + public void onResponseReceived( + final long responseRequestId, + final Transport.ResponseContext context) { + if (requestId == responseRequestId) { + final RetentionLeaseNotFoundException e = + new RetentionLeaseNotFoundException(retentionLeaseId); + context.handler().handleException(new RemoteTransportException(e.getMessage(), e)); + responseLatch.countDown(); + senderTransportService.transport().removeMessageListener(this); + } + } + + }); + + } catch (final InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e.toString()); + } + } + connection.sendRequest(requestId, action, request, options); + }); + } + + removeLeaseLatch.await(); + + pauseFollow(followerIndex); + assertAcked(followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet()); + assertAcked(followerClient().execute(UnfollowAction.INSTANCE, new UnfollowAction.Request(followerIndex)).actionGet()); + + unfollowLatch.countDown(); + + responseLatch.await(); + + final IndicesStatsResponse afterUnfollowStats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + final List afterUnfollowShardsStats = getShardsStats(afterUnfollowStats); + for (final ShardStats shardStats : afterUnfollowShardsStats) { + assertNotNull(shardStats.getRetentionLeaseStats()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + } + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } + } + } + + public void testForgetFollower() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final int numberOfShards = randomIntBetween(1, 4); + final String leaderIndexSettings = + getIndexSettings(numberOfShards, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureLeaderYellow(leaderIndex); + final PutFollowAction.Request followRequest = putFollow(leaderIndex, followerIndex); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen(true, followerIndex); + + pauseFollow(followerIndex); + followerClient().admin().indices().close(new CloseIndexRequest(followerIndex)).actionGet(); + + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + + final BroadcastResponse forgetFollowerResponse = leaderClient().execute( + ForgetFollowerAction.INSTANCE, + new ForgetFollowerAction.Request( + getFollowerCluster().getClusterName(), + followerIndex, + followerUUID, + "leader_cluster", + leaderIndex)).actionGet(); + + logger.info(Strings.toString(forgetFollowerResponse)); + assertThat(forgetFollowerResponse.getTotalShards(), equalTo(numberOfShards)); + assertThat(forgetFollowerResponse.getSuccessfulShards(), equalTo(numberOfShards)); + assertThat(forgetFollowerResponse.getFailedShards(), equalTo(0)); + assertThat(forgetFollowerResponse.getShardFailures(), emptyArray()); + + final IndicesStatsResponse afterForgetFollowerStats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + final List afterForgetFollowerShardsStats = getShardsStats(afterForgetFollowerStats); + for (final ShardStats shardStats : afterForgetFollowerShardsStats) { + assertNotNull(shardStats.getRetentionLeaseStats()); + assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); + } + } + + private void assertRetentionLeaseRenewal( + final int numberOfShards, + final int numberOfReplicas, + final String followerIndex, + final String leaderIndex) throws Exception { + // ensure that a retention lease has been put in place on each shard, and grab a copy of them + final List retentionLeases = new ArrayList<>(); + assertBusy(() -> { + retentionLeases.clear(); + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); + retentionLeases.add(currentRetentionLeases); + } + }); + + // now ensure that the retention leases are being renewed + assertBusy(() -> { + final IndicesStatsResponse stats = + leaderClient().admin().indices().stats(new IndicesStatsRequest().clear().indices(leaderIndex)).actionGet(); + assertNotNull(stats.getShards()); + assertThat(stats.getShards(), arrayWithSize(numberOfShards * (1 + numberOfReplicas))); + final List shardsStats = getShardsStats(stats); + for (int i = 0; i < numberOfShards * (1 + numberOfReplicas); i++) { + assertNotNull(shardsStats.get(i).getRetentionLeaseStats()); + final RetentionLeases currentRetentionLeases = shardsStats.get(i).getRetentionLeaseStats().retentionLeases(); + assertThat(Strings.toString(shardsStats.get(i)), currentRetentionLeases.leases(), hasSize(1)); + final RetentionLease retentionLease = + currentRetentionLeases.leases().iterator().next(); + assertThat(retentionLease.id(), equalTo(getRetentionLeaseId(followerIndex, leaderIndex))); + // we assert that retention leases are being renewed by an increase in the timestamp + assertThat(retentionLease.timestamp(), greaterThan(retentionLeases.get(i).leases().iterator().next().timestamp())); + } + }); + } + + /** + * Extract the shard stats from an indices stats response, with the stats ordered by shard ID with primaries first. This is to have a + * consistent ordering when comparing two responses. + * + * @param stats the indices stats + * @return the shard stats in sorted order with (shard ID, primary) as the sort key + */ + private List getShardsStats(final IndicesStatsResponse stats) { + return Arrays.stream(stats.getShards()) + .sorted((s, t) -> { + if (s.getShardRouting().shardId().id() == t.getShardRouting().shardId().id()) { + return -Boolean.compare(s.getShardRouting().primary(), t.getShardRouting().primary()); + } else { + return Integer.compare(s.getShardRouting().shardId().id(), t.getShardRouting().shardId().id()); + } + }) + .collect(Collectors.toList()); + } + + private String getRetentionLeaseId(final String followerIndex, final String leaderIndex) { + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(followerIndex).get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index(followerIndex).getIndexUUID(); + + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices(leaderIndex).get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index(leaderIndex).getIndexUUID(); + + return getRetentionLeaseId(followerIndex, followerUUID, leaderIndex, leaderUUID); + } + + private String getRetentionLeaseId(String followerIndex, String followerUUID, String leaderIndex, String leaderUUID) { + return retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index(followerIndex, followerUUID), + getLeaderCluster().getClusterName(), + new Index(leaderIndex, leaderUUID)); + } + + private void assertExpectedDocument(final String followerIndex, final int value) { + final GetResponse getResponse = followerClient().prepareGet(followerIndex, "doc", Integer.toString(value)).get(); + assertTrue("doc with id [" + value + "] is missing", getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("f"))); + assertThat(getResponse.getSource().get("f"), equalTo(value)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java new file mode 100644 index 0000000000000..0551d30c2e73a --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CloseFollowerIndexIT.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CloseFollowerIndexIT extends CcrIntegTestCase { + + public void testCloseAndReopenFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderYellow("index1"); + + PutFollowAction.Request followRequest = new PutFollowAction.Request(); + followRequest.setRemoteCluster("leader_cluster"); + followRequest.setLeaderIndex("index1"); + followRequest.setFollowerIndex("index2"); + followRequest.getParameters().setMaxRetryDelay(TimeValue.timeValueMillis(10)); + followRequest.getParameters().setReadPollTimeout(TimeValue.timeValueMillis(10)); + followRequest.getParameters().setMaxReadRequestSize(new ByteSizeValue(1)); + followRequest.getParameters().setMaxOutstandingReadRequests(128); + followRequest.waitForActiveShards(ActiveShardCount.DEFAULT); + + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + ensureFollowerGreen("index2"); + + AtomicBoolean isRunning = new AtomicBoolean(true); + int numThreads = 4; + Thread[] threads = new Thread[numThreads]; + for (int i = 0; i < numThreads; i++) { + threads[i] = new Thread(() -> { + while (isRunning.get()) { + leaderClient().prepareIndex("index1", "doc").setSource("{}", XContentType.JSON).get(); + } + }); + threads[i].start(); + } + + atLeastDocsIndexed(followerClient(), "index2", 32); + AcknowledgedResponse response = followerClient().admin().indices().close(new CloseIndexRequest("index2")).get(); + assertThat(response.isAcknowledged(), is(true)); + + ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + List blocks = new ArrayList<>(clusterState.getBlocks().indices().get("index2")); + assertThat(blocks.size(), equalTo(1)); + assertThat(blocks.get(0).id(), equalTo(MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID)); + + isRunning.set(false); + for (Thread thread : threads) { + thread.join(); + } + assertAcked(followerClient().admin().indices().open(new OpenIndexRequest("index2")).get()); + + refresh(leaderClient(), "index1"); + SearchRequest leaderSearchRequest = new SearchRequest("index1"); + leaderSearchRequest.source().trackTotalHits(true); + long leaderIndexDocs = leaderClient().search(leaderSearchRequest).actionGet().getHits().getTotalHits().value; + assertBusy(() -> { + refresh(followerClient(), "index2"); + SearchRequest followerSearchRequest = new SearchRequest("index2"); + followerSearchRequest.source().trackTotalHits(true); + long followerIndexDocs = followerClient().search(followerSearchRequest).actionGet().getHits().getTotalHits().value; + assertThat(followerIndexDocs, equalTo(leaderIndexDocs)); + }); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index 32f63787db908..c1d6a3aad337a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -45,7 +45,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@TestLogging("org.elasticsearch.xpack.ccr:TRACE,org.elasticsearch.index.shard:DEBUG") +@TestLogging("org.elasticsearch.xpack.ccr:TRACE,org.elasticsearch.xpack.ccr.action.ShardChangesAction:DEBUG," + + "org.elasticsearch.index.shard:TRACE") public class FollowerFailOverIT extends CcrIntegTestCase { @Override @@ -54,12 +55,14 @@ protected boolean reuseClusters() { } public void testFailOverOnFollower() throws Exception { + final String leaderIndex = "leader_test_failover"; + final String followerIndex = "follower_test_failover"; int numberOfReplicas = between(1, 2); getFollowerCluster().startMasterOnlyNode(); getFollowerCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(1, 2)); String leaderIndexSettings = getIndexSettings(1, numberOfReplicas, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); - assertAcked(leaderClient().admin().indices().prepareCreate("leader-index").setSource(leaderIndexSettings, XContentType.JSON)); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON)); AtomicBoolean stopped = new AtomicBoolean(); Thread[] threads = new Thread[between(1, 8)]; AtomicInteger docID = new AtomicInteger(); @@ -76,20 +79,20 @@ public void testFailOverOnFollower() throws Exception { } if (frequently()) { String id = Integer.toString(frequently() ? docID.incrementAndGet() : between(0, 10)); // sometimes update - IndexResponse indexResponse = leaderClient().prepareIndex("leader-index", "doc", id) + IndexResponse indexResponse = leaderClient().prepareIndex(leaderIndex, "doc", id) .setSource("{\"f\":" + id + "}", XContentType.JSON).get(); - logger.info("--> index id={} seq_no={}", indexResponse.getId(), indexResponse.getSeqNo()); + logger.info("--> index {} id={} seq_no={}", leaderIndex, indexResponse.getId(), indexResponse.getSeqNo()); } else { String id = Integer.toString(between(0, docID.get())); - DeleteResponse deleteResponse = leaderClient().prepareDelete("leader-index", "doc", id).get(); - logger.info("--> delete id={} seq_no={}", deleteResponse.getId(), deleteResponse.getSeqNo()); + DeleteResponse deleteResponse = leaderClient().prepareDelete(leaderIndex, "doc", id).get(); + logger.info("--> delete {} id={} seq_no={}", leaderIndex, deleteResponse.getId(), deleteResponse.getSeqNo()); } } }); threads[i].start(); } availableDocs.release(between(100, 200)); - PutFollowAction.Request follow = putFollow("leader-index", "follower-index"); + PutFollowAction.Request follow = putFollow(leaderIndex, followerIndex); follow.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); follow.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); follow.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); @@ -98,11 +101,11 @@ public void testFailOverOnFollower() throws Exception { follow.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); logger.info("--> follow request {}", Strings.toString(follow)); followerClient().execute(PutFollowAction.INSTANCE, follow).get(); - disableDelayedAllocation("follower-index"); - ensureFollowerGreen("follower-index"); - awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), between(30, 80)); + disableDelayedAllocation(followerIndex); + ensureFollowerGreen(followerIndex); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex(followerIndex), 0), between(30, 80)); final ClusterState clusterState = getFollowerCluster().clusterService().state(); - for (ShardRouting shardRouting : clusterState.routingTable().allShards("follower-index")) { + for (ShardRouting shardRouting : clusterState.routingTable().allShards(followerIndex)) { if (shardRouting.primary()) { DiscoveryNode assignedNode = clusterState.nodes().get(shardRouting.currentNodeId()); getFollowerCluster().restartNode(assignedNode.getName(), new InternalTestCluster.RestartCallback()); @@ -110,18 +113,17 @@ public void testFailOverOnFollower() throws Exception { } } availableDocs.release(between(50, 200)); - ensureFollowerGreen("follower-index"); + ensureFollowerGreen(followerIndex); availableDocs.release(between(50, 200)); - awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex("follower-index"), 0), between(100, 150)); + awaitGlobalCheckpointAtLeast(followerClient(), new ShardId(resolveFollowerIndex(followerIndex), 0), between(100, 150)); stopped.set(true); for (Thread thread : threads) { thread.join(); } - assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); - pauseFollow("follower-index"); + assertIndexFullyReplicatedToFollower(leaderIndex, followerIndex); + pauseFollow(followerIndex); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33337") public void testFollowIndexAndCloseNode() throws Exception { getFollowerCluster().ensureAtLeastNumDataNodes(3); String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); @@ -234,7 +236,6 @@ public void testAddNewReplicasOnFollower() throws Exception { pauseFollow("follower-index"); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37807") public void testReadRequestsReturnLatestMappingVersion() throws Exception { InternalTestCluster leaderCluster = getLeaderCluster(); Settings nodeAttributes = Settings.builder().put("node.attr.box", "large").build(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 80bded6a5d1d3..d9b75f416b38d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; @@ -15,6 +16,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; @@ -43,9 +45,12 @@ import org.elasticsearch.cluster.health.ClusterShardHealth; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -53,8 +58,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; @@ -62,6 +69,8 @@ import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.transport.NoSuchRemoteClusterException; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -87,11 +96,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; +import java.util.function.Consumer; import java.util.stream.Collectors; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -106,6 +117,12 @@ public class IndexFollowingIT extends CcrIntegTestCase { public void testFollowIndex() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); int numberOfReplicas = between(0, 1); + + followerClient().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), + new ByteSizeValue(randomIntBetween(1, 1000), ByteSizeUnit.KB))) + .get(); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, numberOfReplicas, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); @@ -114,7 +131,7 @@ public void testFollowIndex() throws Exception { final int firstBatchNumDocs; // Sometimes we want to index a lot of documents to ensure that the recovery works with larger files if (rarely()) { - firstBatchNumDocs = randomIntBetween(1800, 2000); + firstBatchNumDocs = randomIntBetween(1800, 10000); } else { firstBatchNumDocs = randomIntBetween(10, 64); } @@ -127,6 +144,7 @@ public void testFollowIndex() throws Exception { waitForDocs(firstBatchNumDocs, indexer); indexer.assertNoFailures(); + logger.info("Executing put follow"); boolean waitOnAll = randomBoolean(); final PutFollowAction.Request followRequest; @@ -176,6 +194,8 @@ public void testFollowIndex() throws Exception { logger.info("Indexing [{}] docs as second batch", secondBatchNumDocs); indexer.continueIndexing(secondBatchNumDocs); + waitForDocs(firstBatchNumDocs + secondBatchNumDocs, indexer); + final Map secondBatchNumDocsPerShard = new HashMap<>(); final ShardStats[] secondBatchShardStats = leaderClient().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); @@ -194,6 +214,7 @@ public void testFollowIndex() throws Exception { assertTrue("Doc with id [" + docId + "] is missing", getResponse.isExists()); }); } + pauseFollow("index2"); assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); } @@ -971,9 +992,70 @@ public void testMustCloseIndexAndPauseToRestartWithPutFollowing() throws Excepti } public void testIndexFallBehind() throws Exception { + runFallBehindTest( + () -> { + // we have to remove the retention leases on the leader shards to ensure the follower falls behind + final ClusterStateResponse followerIndexClusterState = + followerClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices("index2").get(); + final String followerUUID = followerIndexClusterState.getState().metaData().index("index2").getIndexUUID(); + final ClusterStateResponse leaderIndexClusterState = + leaderClient().admin().cluster().prepareState().clear().setMetaData(true).setIndices("index1").get(); + final String leaderUUID = leaderIndexClusterState.getState().metaData().index("index1").getIndexUUID(); + + final RoutingTable leaderRoutingTable = leaderClient() + .admin() + .cluster() + .prepareState() + .clear() + .setIndices("index1") + .setRoutingTable(true) + .get() + .getState() + .routingTable(); + + final String retentionLeaseId = retentionLeaseId( + getFollowerCluster().getClusterName(), + new Index("index2", followerUUID), + getLeaderCluster().getClusterName(), + new Index("index1", leaderUUID)); + + for (final ObjectCursor shardRoutingTable + : leaderRoutingTable.index("index1").shards().values()) { + final ShardId shardId = shardRoutingTable.value.shardId(); + leaderClient().execute( + RetentionLeaseActions.Remove.INSTANCE, + new RetentionLeaseActions.RemoveRequest(shardId, retentionLeaseId)) + .get(); + } + }, + exceptions -> assertThat(exceptions.size(), greaterThan(0))); + } + + public void testIndexDoesNotFallBehind() throws Exception { + runFallBehindTest( + () -> {}, + exceptions -> assertThat(exceptions.size(), equalTo(0))); + } + + /** + * Runs a fall behind test. In this test, we construct a situation where a follower is paused. While the follower is paused we index + * more documents that causes soft deletes on the leader, flush them, and run a force merge. This is to set up a situation where the + * operations will not necessarily be there. With retention leases in place, we would actually expect the operations to be there. After + * pausing the follower, the specified callback is executed. This gives a test an opportunity to set up assumptions. For example, a test + * might remove all the retention leases on the leader to set up a situation where the follower will fall behind when it is resumed + * because the operations will no longer be held on the leader. The specified exceptions callback is invoked after resuming the follower + * to give a test an opportunity to assert on the resource not found exceptions (either present or not present). + * + * @param afterPausingFollower the callback to run after pausing the follower + * @param exceptionConsumer the callback to run on a collection of resource not found exceptions after resuming the follower + * @throws Exception if a checked exception is thrown during the test + */ + private void runFallBehindTest( + final CheckedRunnable afterPausingFollower, + final Consumer> exceptionConsumer) throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), - singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); @@ -997,6 +1079,8 @@ public void testIndexFallBehind() throws Exception { pauseFollow("index2"); + afterPausingFollower.run(); + for (int i = 0; i < numDocs; i++) { final String source = String.format(Locale.ROOT, "{\"f\":%d}", i * 2); leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); @@ -1013,20 +1097,19 @@ public void testIndexFallBehind() throws Exception { assertBusy(() -> { List statuses = getFollowTaskStatuses("index2"); Set exceptions = statuses.stream() - .map(ShardFollowNodeTaskStatus::getFatalException) - .filter(Objects::nonNull) - .map(ExceptionsHelper::unwrapCause) - .filter(e -> e instanceof ResourceNotFoundException) - .map(e -> (ResourceNotFoundException) e) - .filter(e -> e.getMetadataKeys().contains("es.requested_operations_missing")) - .collect(Collectors.toSet()); - assertThat(exceptions.size(), greaterThan(0)); + .map(ShardFollowNodeTaskStatus::getFatalException) + .filter(Objects::nonNull) + .map(ExceptionsHelper::unwrapCause) + .filter(e -> e instanceof ResourceNotFoundException) + .map(e -> (ResourceNotFoundException) e) + .filter(e -> e.getMetadataKeys().contains("es.requested_operations_missing")) + .collect(Collectors.toSet()); + exceptionConsumer.accept(exceptions); }); followerClient().admin().indices().prepareClose("index2").get(); pauseFollow("index2"); - final PutFollowAction.Request followRequest2 = putFollow("index1", "index2"); PutFollowAction.Response response2 = followerClient().execute(PutFollowAction.INSTANCE, followRequest2).get(); assertTrue(response2.isFollowIndexCreated()); @@ -1040,6 +1123,69 @@ public void testIndexFallBehind() throws Exception { } } + public void testUpdateRemoteConfigsDuringFollowing() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + int numberOfReplicas = between(0, 1); + + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, numberOfReplicas, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderYellow("index1"); + + final int firstBatchNumDocs = randomIntBetween(200, 800); + + logger.info("Executing put follow"); + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); + PutFollowAction.Response response = followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + assertTrue(response.isFollowIndexCreated()); + assertTrue(response.isFollowIndexShardsAcked()); + assertTrue(response.isIndexFollowingStarted()); + + logger.info("Indexing [{}] docs while updateing remote config", firstBatchNumDocs); + try (BackgroundIndexer indexer = new BackgroundIndexer("index1", "_doc", leaderClient(), firstBatchNumDocs, + randomIntBetween(1, 5))) { + + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); + Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting> seeds = RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("leader_cluster"); + settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), true).put(seeds.getKey(), address)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + + waitForDocs(firstBatchNumDocs, indexer); + indexer.assertNoFailures(); + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = + leaderClient().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, firstBatchNumDocsPerShard)); + + for (String docId : indexer.getIds()) { + assertBusy(() -> { + final GetResponse getResponse = followerClient().prepareGet("index2", "_doc", docId).get(); + assertTrue("Doc with id [" + docId + "] is missing", getResponse.isExists()); + }); + } + + assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); + } finally { + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); + Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting> seeds = RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("leader_cluster"); + settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), compress.getDefault(Settings.EMPTY)) + .put(seeds.getKey(), address)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } + } + private long getFollowTaskSettingsVersion(String followerIndex) { long settingsVersion = -1L; for (ShardFollowNodeTaskStatus status : getFollowTaskStatuses(followerIndex)) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java index b8649aaa4320c..1073fee6779f3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -6,19 +6,24 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.CcrIntegTestCase; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import java.util.List; import java.util.Locale; import static java.util.Collections.singletonMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RestartIndexFollowingIT extends CcrIntegTestCase { @@ -66,6 +71,7 @@ public void testFollowIndex() throws Exception { equalTo(firstBatchNumDocs + secondBatchNumDocs)); }); + cleanRemoteCluster(); getLeaderCluster().fullRestart(); ensureLeaderGreen("index1"); // Remote connection needs to be re-configured, because all the nodes in leader cluster have been restarted: @@ -82,11 +88,30 @@ public void testFollowIndex() throws Exception { }); } - private void setupRemoteCluster() { + private void setupRemoteCluster() throws Exception { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); String address = getLeaderCluster().getMasterNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", address)); assertAcked(followerClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + List infos = + followerClient().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); + assertThat(infos.size(), equalTo(1)); + assertThat(infos.get(0).getNumNodesConnected(), greaterThanOrEqualTo(1)); + }); + } + + private void cleanRemoteCluster() throws Exception { + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", (String) null)); + assertAcked(followerClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + List infos = + followerClient().execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest()).get().getInfos(); + assertThat(infos.size(), equalTo(0)); + }); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 2037c7faaa7b4..75e6a732c8210 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -98,7 +98,7 @@ void getRemoteClusterState(String remoteCluster, long metadataVersion, BiConsumer handler) { assertThat(remoteCluster, equalTo("remote")); - handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, false), null); } @Override @@ -217,7 +217,7 @@ public void testAutoFollowerUpdateClusterStateFailure() { void getRemoteClusterState(String remoteCluster, long metadataVersion, BiConsumer handler) { - handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, false), null); } @Override @@ -274,7 +274,7 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { void getRemoteClusterState(String remoteCluster, long metadataVersion, BiConsumer handler) { - handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, false), null); } @Override @@ -729,7 +729,7 @@ void getRemoteClusterState(String remoteCluster, BiConsumer handler) { assertThat(remoteCluster, equalTo("remote")); assertThat(metadataVersion, greaterThan(previousRequestedMetadataVersion)); - handler.accept(new ClusterStateResponse(new ClusterName("name"), leaderStates.poll(), 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), leaderStates.poll(), false), null); } @Override @@ -788,7 +788,7 @@ void getRemoteClusterState(String remoteCluster, counter.incrementAndGet(); assertThat(remoteCluster, equalTo("remote")); assertThat(metadataVersion, greaterThan(previousRequestedMetadataVersion)); - handler.accept(new ClusterStateResponse(new ClusterName("name"), null, 1L, true), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), null, true), null); } @Override @@ -838,7 +838,7 @@ void getRemoteClusterState(String remoteCluster, long metadataVersion, BiConsumer handler) { assertThat(remoteCluster, equalTo("remote")); - handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, false), null); } @Override @@ -914,7 +914,7 @@ void getRemoteClusterState(String remoteCluster, long metadataVersion, BiConsumer handler) { assertThat(remoteCluster, equalTo("remote")); - handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, 1L, false), null); + handler.accept(new ClusterStateResponse(new ClusterName("name"), remoteState, false), null); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowInfoResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowInfoResponseTests.java index eceb37819d187..64f01fe2e104f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowInfoResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowInfoResponseTests.java @@ -6,10 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; @@ -26,7 +23,6 @@ public class FollowInfoResponseTests extends AbstractSerializingTestCase { - static final ObjectParser PARAMETERS_PARSER = new ObjectParser<>("parameters_parser", FollowParameters::new); static final ConstructingObjectParser INFO_PARSER = new ConstructingObjectParser<>( "info_parser", args -> { @@ -40,13 +36,12 @@ public class FollowInfoResponseTests extends AbstractSerializingTestCase { + + static final ObjectParser PARSER = new ObjectParser<>("test_parser", FollowParameters::new); + static { + FollowParameters.initParser(PARSER); + } + + @Override + protected FollowParameters doParseInstance(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + @Override + protected FollowParameters createTestInstance() { + return randomInstance(); + } + + @Override + protected Writeable.Reader instanceReader() { + return FollowParameters::new; + } + + static FollowParameters randomInstance() { + FollowParameters followParameters = new FollowParameters(); + followParameters.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + followParameters.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + followParameters.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + followParameters.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + return followParameters; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java index 7130c830baa01..55582815ce5e6 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/GetAutoFollowPatternResponseTests.java @@ -34,11 +34,11 @@ protected GetAutoFollowPatternAction.Response createTestInstance() { Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), randomIntBetween(0, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(0, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong()), TimeValue.timeValueMillis(500), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index 629127c454cef..46c7c51586c53 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; @@ -32,6 +33,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; +import java.util.function.LongSupplier; import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -82,11 +84,11 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), testRun.maxOperationCount, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - concurrency, testRun.maxOperationCount, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, concurrency, + concurrency, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), @@ -177,6 +179,23 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co threadPool.generic().execute(task); } + @Override + protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { + return new Scheduler.Cancellable() { + + @Override + public boolean cancel() { + return true; + } + + @Override + public boolean isCancelled() { + return true; + } + + }; + } + @Override protected boolean isStopped() { return stopped.get(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index a7d07b6066732..09d00dc6a33ac 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -8,13 +8,16 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -27,12 +30,17 @@ import java.util.List; import java.util.Map; import java.util.Queue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; +import java.util.function.LongSupplier; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; @@ -53,6 +61,9 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private Consumer beforeSendShardChangesRequest = status -> {}; + private AtomicBoolean scheduleRetentionLeaseRenewal = new AtomicBoolean(); + private LongConsumer retentionLeaseRenewal = followerGlobalCheckpoint -> {}; + private AtomicBoolean simulateResponse = new AtomicBoolean(); private Queue readFailures; @@ -936,6 +947,28 @@ public void testComputeDelay() { assertThat(ShardFollowNodeTask.computeDelay(1024, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); } + public void testRetentionLeaseRenewal() throws InterruptedException { + scheduleRetentionLeaseRenewal.set(true); + final CountDownLatch latch = new CountDownLatch(1); + final long expectedFollowerGlobalChekcpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retentionLeaseRenewal = followerGlobalCheckpoint -> { + assertThat(followerGlobalCheckpoint, equalTo(expectedFollowerGlobalChekcpoint)); + latch.countDown(); + }; + + final ShardFollowTaskParams params = new ShardFollowTaskParams(); + final ShardFollowNodeTask task = createShardFollowTask(params); + + try { + startTask(task, randomLongBetween(expectedFollowerGlobalChekcpoint, Long.MAX_VALUE), expectedFollowerGlobalChekcpoint); + latch.await(); + } finally { + task.onCancelled(); + scheduleRetentionLeaseRenewal.set(false); + } + } + + static final class ShardFollowTaskParams { private String remoteCluster = null; private ShardId followShardId = new ShardId("follow_index", "", 0); @@ -960,11 +993,11 @@ private ShardFollowNodeTask createShardFollowTask(ShardFollowTaskParams params) params.followShardId, params.leaderShardId, params.maxReadRequestOperationCount, - params.maxReadRequestSize, - params.maxOutstandingReadRequests, params.maxWriteRequestOperationCount, - params.maxWriteRequestSize, + params.maxOutstandingReadRequests, params.maxOutstandingWriteRequests, + params.maxReadRequestSize, + params.maxWriteRequestSize, params.maxWriteBufferCount, params.maxWriteBufferSize, params.maxRetryDelay, @@ -1063,6 +1096,47 @@ protected void innerSendShardChangesRequest(long from, int requestBatchSize, Con } } + @Override + protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { + if (scheduleRetentionLeaseRenewal.get()) { + final ScheduledThreadPoolExecutor scheduler = Scheduler.initScheduler(Settings.EMPTY); + final ScheduledFuture future = scheduler.scheduleWithFixedDelay( + () -> retentionLeaseRenewal.accept(followerGlobalCheckpoint.getAsLong()), + 0, + TimeValue.timeValueMillis(200).millis(), + TimeUnit.MILLISECONDS); + return new Scheduler.Cancellable() { + + @Override + public boolean cancel() { + final boolean cancel = future.cancel(true); + scheduler.shutdown(); + return cancel; + } + + @Override + public boolean isCancelled() { + return future.isCancelled(); + } + + }; + } else { + return new Scheduler.Cancellable() { + + @Override + public boolean cancel() { + return true; + } + + @Override + public boolean isCancelled() { + return true; + } + + }; + } + } + @Override protected boolean isStopped() { return super.isStopped() || stopped.get(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 8b05b618ba407..8c4dd6361924a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -7,6 +7,7 @@ import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; +import org.apache.lucene.store.IOContext; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -15,11 +16,17 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -31,10 +38,19 @@ import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.RestoreOnlyRepository; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrRetentionLeases; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; @@ -47,250 +63,263 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashSet; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; +import java.util.function.LongSupplier; import java.util.stream.Collectors; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTestCase { public void testSimpleCcrReplication() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(randomInt(2)); - ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(randomInt(2))) { leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, randomInt(2))) { + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( followerGroup.getPrimary().getHistoryUUID(), leaderSeqNoStats.getGlobalCheckpoint(), leaderSeqNoStats.getMaxSeqNo(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - docCount += leaderGroup.appendDocs(randomInt(128)); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - for (IndexShard shard : followerGroup) { - assertThat(((FollowingEngine) (getEngine(shard))).getNumberOfOptimizedIndexing(), equalTo((long) docCount)); - } - // Deletes should be replicated to the follower - List deleteDocIds = randomSubsetOf(indexedDocIds); - for (String deleteId : deleteDocIds) { - BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); - assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + docCount += leaderGroup.appendDocs(randomInt(128)); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + for (IndexShard shard : followerGroup) { + assertThat(((FollowingEngine) (getEngine(shard))).getNumberOfOptimizedIndexing(), equalTo((long) docCount)); + } + // Deletes should be replicated to the follower + List deleteDocIds = randomSubsetOf(indexedDocIds); + for (String deleteId : deleteDocIds) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); + assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + } + leaderGroup.syncGlobalCheckpoint(); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); + }); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); } - leaderGroup.syncGlobalCheckpoint(); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); - }); - shardFollowTask.markAsCompleted(); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); } } public void testAddRemoveShardOnLeader() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(1 + randomInt(1)); - ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(1 + randomInt(1))) { leaderGroup.startAll(); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, randomInt(2))) { + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( followerGroup.getPrimary().getHistoryUUID(), leaderSeqNoStats.getGlobalCheckpoint(), leaderSeqNoStats.getMaxSeqNo(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - int batches = between(0, 10); - int docCount = 0; - boolean hasPromotion = false; - for (int i = 0; i < batches; i++) { - docCount += leaderGroup.indexDocs(between(1, 5)); - if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { - IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); - leaderGroup.removeReplica(closingReplica); - closingReplica.close("test", false); - closingReplica.store().close(); - } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { - IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); - leaderGroup.promoteReplicaToPrimary(newPrimary).get(); - hasPromotion = true; - } else if (randomInt(100) < 5) { - leaderGroup.addReplica(); - leaderGroup.startReplicas(1); + int batches = between(0, 10); + int docCount = 0; + boolean hasPromotion = false; + for (int i = 0; i < batches; i++) { + docCount += leaderGroup.indexDocs(between(1, 5)); + if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { + IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); + leaderGroup.removeReplica(closingReplica); + closingReplica.close("test", false); + closingReplica.store().close(); + } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { + IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); + leaderGroup.promoteReplicaToPrimary(newPrimary).get(); + hasPromotion = true; + } else if (randomInt(100) < 5) { + leaderGroup.addReplica(); + leaderGroup.startReplicas(1); + } + leaderGroup.syncGlobalCheckpoint(); } - leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + assertThat(shardFollowTask.getFailure(), nullValue()); + int expectedDoc = docCount; + assertBusy(() -> followerGroup.assertAllEqual(expectedDoc)); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, hasPromotion == false); } - leaderGroup.assertAllEqual(docCount); - assertThat(shardFollowTask.getFailure(), nullValue()); - int expectedDoc = docCount; - assertBusy(() -> followerGroup.assertAllEqual(expectedDoc)); - shardFollowTask.markAsCompleted(); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, hasPromotion == false); } } public void testChangeLeaderHistoryUUID() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(0); - ReplicationGroup followerGroup = createFollowGroup(0)) { - leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( - followerGroup.getPrimary().getHistoryUUID(), - leaderSeqNoStats.getGlobalCheckpoint(), - leaderSeqNoStats.getMaxSeqNo(), - followerSeqNoStats.getGlobalCheckpoint(), - followerSeqNoStats.getMaxSeqNo()); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - - String oldHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); - leaderGroup.reinitPrimaryShard(); - leaderGroup.getPrimary().store().bootstrapNewHistory(); - recoverShardFromStore(leaderGroup.getPrimary()); - String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); - - // force the global checkpoint on the leader to advance - leaderGroup.appendDocs(64); - - assertBusy(() -> { - assertThat(shardFollowTask.isStopped(), is(true)); - ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); - assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + - "], actual [" + newHistoryUUID + "]")); - }); + try (ReplicationGroup leaderGroup = createLeaderGroup(0)) { + try (ReplicationGroup followerGroup = createFollowGroup(leaderGroup, 0)) { + leaderGroup.startAll(); + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + followerGroup.getPrimary().getHistoryUUID(), + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + + String oldHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + leaderGroup.reinitPrimaryShard(); + leaderGroup.getPrimary().store().bootstrapNewHistory(); + recoverShardFromStore(leaderGroup.getPrimary()); + String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + + // force the global checkpoint on the leader to advance + leaderGroup.appendDocs(64); + + assertBusy(() -> { + assertThat(shardFollowTask.isStopped(), is(true)); + ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); + assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + + "], actual [" + newHistoryUUID + "]")); + }); + } } } public void testChangeFollowerHistoryUUID() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(0); - ReplicationGroup followerGroup = createFollowGroup(0)) { + try (ReplicationGroup leaderGroup = createLeaderGroup(0)) { leaderGroup.startAll(); - int docCount = leaderGroup.appendDocs(randomInt(64)); - leaderGroup.assertAllEqual(docCount); - followerGroup.startAll(); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); - final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start( - followerGroup.getPrimary().getHistoryUUID(), - leaderSeqNoStats.getGlobalCheckpoint(), - leaderSeqNoStats.getMaxSeqNo(), - followerSeqNoStats.getGlobalCheckpoint(), - followerSeqNoStats.getMaxSeqNo()); - leaderGroup.syncGlobalCheckpoint(); - leaderGroup.assertAllEqual(docCount); - Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); - followerGroup.assertAllEqual(indexedDocIds.size()); - }); - - String oldHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); - followerGroup.reinitPrimaryShard(); - followerGroup.getPrimary().store().bootstrapNewHistory(); - recoverShardFromStore(followerGroup.getPrimary()); - String newHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); - - // force the global checkpoint on the leader to advance - leaderGroup.appendDocs(64); - - assertBusy(() -> { - assertThat(shardFollowTask.isStopped(), is(true)); - ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); - assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + - "], actual [" + newHistoryUUID + "], shard is likely restored from snapshot or force allocated")); - }); + try(ReplicationGroup followerGroup = createFollowGroup(leaderGroup, 0)) { + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + followerGroup.getPrimary().getHistoryUUID(), + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + + String oldHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); + followerGroup.reinitPrimaryShard(); + followerGroup.getPrimary().store().bootstrapNewHistory(); + recoverShardFromStore(followerGroup.getPrimary()); + String newHistoryUUID = followerGroup.getPrimary().getHistoryUUID(); + + // force the global checkpoint on the leader to advance + leaderGroup.appendDocs(64); + + assertBusy(() -> { + assertThat(shardFollowTask.isStopped(), is(true)); + ElasticsearchException failure = shardFollowTask.getStatus().getFatalException(); + assertThat(failure.getRootCause().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + + "], actual [" + newHistoryUUID + "], shard is likely restored from snapshot or force allocated")); + }); + } } } public void testRetryBulkShardOperations() throws Exception { - try (ReplicationGroup leaderGroup = createGroup(between(0, 1)); - ReplicationGroup followerGroup = createFollowGroup(between(1, 3))) { + try (ReplicationGroup leaderGroup = createLeaderGroup(between(0, 1))) { leaderGroup.startAll(); - followerGroup.startAll(); - leaderGroup.appendDocs(between(10, 100)); - leaderGroup.refresh("test"); - for (int numNoOps = between(1, 10), i = 0; i < numNoOps; i++) { - long seqNo = leaderGroup.getPrimary().seqNoStats().getMaxSeqNo() + 1; - Engine.NoOp noOp = new Engine.NoOp(seqNo, leaderGroup.getPrimary().getOperationPrimaryTerm(), - Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), "test-" + i); - for (IndexShard shard : leaderGroup) { - getEngine(shard).noOp(noOp); + try(ReplicationGroup followerGroup = createFollowGroup(leaderGroup, between(1, 3))) { + followerGroup.startAll(); + leaderGroup.appendDocs(between(10, 100)); + leaderGroup.refresh("test"); + for (int numNoOps = between(1, 10), i = 0; i < numNoOps; i++) { + long seqNo = leaderGroup.getPrimary().seqNoStats().getMaxSeqNo() + 1; + Engine.NoOp noOp = new Engine.NoOp(seqNo, leaderGroup.getPrimary().getOperationPrimaryTerm(), + Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), "test-" + i); + for (IndexShard shard : leaderGroup) { + getEngine(shard).noOp(noOp); + } } - } - for (String deleteId : randomSubsetOf(IndexShardTestCase.getShardDocUIDs(leaderGroup.getPrimary()))) { - BulkItemResponse resp = leaderGroup.delete(new DeleteRequest("test", "type", deleteId)); - assertThat(resp.getFailure(), nullValue()); - } - leaderGroup.syncGlobalCheckpoint(); - IndexShard leadingPrimary = leaderGroup.getPrimary(); - // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower - // but the primary of the follower crashed before these requests completed. - for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { - long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); - long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); - int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); - Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), - fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); - - IndexShard followingPrimary = followerGroup.getPrimary(); - TransportWriteAction.WritePrimaryResult primaryResult = - TransportBulkShardOperationsAction.shardOperationOnPrimary(followingPrimary.shardId(), - followingPrimary.getHistoryUUID(), Arrays.asList(ops), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), - followingPrimary, logger); - for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { - final PlainActionFuture permitFuture = new PlainActionFuture<>(); - replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), - followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), - permitFuture, ThreadPool.Names.SAME, primaryResult); - try (Releasable ignored = permitFuture.get()) { - TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); + for (String deleteId : randomSubsetOf(IndexShardTestCase.getShardDocUIDs(leaderGroup.getPrimary()))) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest("test", "type", deleteId)); + assertThat(resp.getFailure(), nullValue()); + } + leaderGroup.syncGlobalCheckpoint(); + IndexShard leadingPrimary = leaderGroup.getPrimary(); + // Simulates some bulk requests are completed on the primary and replicated to some (but all) replicas of the follower + // but the primary of the follower crashed before these requests completed. + for (int numBulks = between(1, 5), i = 0; i < numBulks; i++) { + long fromSeqNo = randomLongBetween(0, leadingPrimary.getGlobalCheckpoint()); + long toSeqNo = randomLongBetween(fromSeqNo, leadingPrimary.getGlobalCheckpoint()); + int numOps = Math.toIntExact(toSeqNo + 1 - fromSeqNo); + Translog.Operation[] ops = ShardChangesAction.getOperations(leadingPrimary, leadingPrimary.getGlobalCheckpoint(), + fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)); + + IndexShard followingPrimary = followerGroup.getPrimary(); + TransportWriteAction.WritePrimaryResult primaryResult = + TransportBulkShardOperationsAction.shardOperationOnPrimary(followingPrimary.shardId(), + followingPrimary.getHistoryUUID(), Arrays.asList(ops), leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followingPrimary, logger); + for (IndexShard replica : randomSubsetOf(followerGroup.getReplicas())) { + final PlainActionFuture permitFuture = new PlainActionFuture<>(); + replica.acquireReplicaOperationPermit(followingPrimary.getOperationPrimaryTerm(), + followingPrimary.getGlobalCheckpoint(), followingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + permitFuture, ThreadPool.Names.SAME, primaryResult); + try (Releasable ignored = permitFuture.get()) { + TransportBulkShardOperationsAction.shardOperationOnReplica(primaryResult.replicaRequest(), replica, logger); + } } } - } - // A follow-task retries these requests while the primary-replica resync is happening on the follower. - followerGroup.promoteReplicaToPrimary(randomFrom(followerGroup.getReplicas())); - ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); - SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); - shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), leadingPrimary.getGlobalCheckpoint(), - leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), followerSeqNoStats.getGlobalCheckpoint(), followerSeqNoStats.getMaxSeqNo()); - try { - assertBusy(() -> { - assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); - assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); - }); - } finally { - shardFollowTask.markAsCompleted(); + // A follow-task retries these requests while the primary-replica resync is happening on the follower. + followerGroup.promoteReplicaToPrimary(randomFrom(followerGroup.getReplicas())); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start(followerGroup.getPrimary().getHistoryUUID(), + leadingPrimary.getGlobalCheckpoint(), + leadingPrimary.getMaxSeqNoOfUpdatesOrDeletes(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + try { + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leadingPrimary.getGlobalCheckpoint())); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup, true); + }); + } finally { + shardFollowTask.markAsCompleted(); + } } } } @@ -303,7 +332,17 @@ public void testAddNewFollowingReplica() throws Exception { operations.add(new Translog.Index("type", Integer.toString(i), i, primaryTerm, 0, source, null, -1)); } Future recoveryFuture = null; - try (ReplicationGroup group = createFollowGroup(between(0, 1))) { + Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .build(); + IndexMetaData indexMetaData = buildIndexMetaData(between(0, 1), settings, indexMapping); + try (ReplicationGroup group = new ReplicationGroup(indexMetaData) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + }) { group.startAll(); while (operations.isEmpty() == false) { List bulkOps = randomSubsetOf(between(1, operations.size()), operations); @@ -330,35 +369,103 @@ public void testAddNewFollowingReplica() throws Exception { } } - @Override - protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { - Settings newSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) - .put(settings) - .build(); - if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(newSettings)) { - IndexMetaData metaData = buildIndexMetaData(replicas, newSettings, indexMapping); - return new ReplicationGroup(metaData) { - - @Override - protected EngineFactory getEngineFactory(ShardRouting routing) { - return new FollowingEngineFactory(); + public void testSimpleRemoteRecovery() throws Exception { + try (ReplicationGroup leader = createLeaderGroup(between(0, 1))) { + leader.startAll(); + leader.appendDocs(between(0, 100)); + leader.flush(); + leader.syncGlobalCheckpoint(); + try (ReplicationGroup follower = createFollowGroup(leader, 0)) { + follower.startAll(); + ShardFollowNodeTask followTask = createShardFollowTask(leader, follower); + followTask.start( + follower.getPrimary().getHistoryUUID(), + leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().seqNoStats().getMaxSeqNo(), + follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().seqNoStats().getMaxSeqNo() + ); + leader.appendDocs(between(0, 100)); + if (randomBoolean()) { + follower.recoverReplica(follower.addReplica()); } - }; - } else { - return super.createGroup(replicas, newSettings); + assertBusy(() -> assertConsistentHistoryBetweenLeaderAndFollower(leader, follower, false)); + followTask.markAsCompleted(); + } + } + } + + public void testRetentionLeaseManagement() throws Exception { + try (ReplicationGroup leader = createLeaderGroup(0)) { + leader.startAll(); + try (ReplicationGroup follower = createFollowGroup(leader, 0)) { + follower.startAll(); + final ShardFollowNodeTask task = createShardFollowTask(leader, follower); + task.start( + follower.getPrimary().getHistoryUUID(), + leader.getPrimary().getGlobalCheckpoint(), + leader.getPrimary().seqNoStats().getMaxSeqNo(), + follower.getPrimary().getGlobalCheckpoint(), + follower.getPrimary().seqNoStats().getMaxSeqNo()); + final Scheduler.Cancellable renewable = task.getRenewable(); + assertNotNull(renewable); + assertFalse(renewable.isCancelled()); + task.onCancelled(); + assertTrue(renewable.isCancelled()); + assertNull(task.getRenewable()); + } } } - private ReplicationGroup createFollowGroup(int replicas) throws IOException { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + private ReplicationGroup createLeaderGroup(int replicas) throws IOException { + Settings settings = Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)); - return createGroup(replicas, settingsBuilder.build()); + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) + .build(); + return createGroup(replicas, settings); + } + + private ReplicationGroup createFollowGroup(ReplicationGroup leaderGroup, int replicas) throws IOException { + final Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put( + IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), + new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .build(); + IndexMetaData indexMetaData = buildIndexMetaData(replicas, settings, indexMapping); + return new ReplicationGroup(indexMetaData) { + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + @Override + protected synchronized void recoverPrimary(IndexShard primary) { + DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID())); + ShardRouting routing = ShardRoutingHelper.newWithRestoreSource(primary.routingEntry(), + new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, Version.CURRENT, "test")); + primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); + primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { + @Override + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, + IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { + try { + IndexShard leader = leaderGroup.getPrimary(); + Lucene.cleanLuceneIndex(primary.store().directory()); + try (Engine.IndexCommitRef sourceCommit = leader.acquireSafeIndexCommit()) { + Store.MetadataSnapshot sourceSnapshot = leader.store().getMetadata(sourceCommit.getIndexCommit()); + for (StoreFileMetaData md : sourceSnapshot) { + primary.store().directory().copyFrom( + leader.store().directory(), md.name(), md.name(), IOContext.DEFAULT); + } + } + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + }); + } + }; } private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ReplicationGroup followerGroup) { @@ -367,11 +474,11 @@ private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), between(1, 64), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), - between(1, 8), between(1, 64), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + between(1, 8), between(1, 4), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), @@ -465,6 +572,27 @@ protected void innerSendShardChangesRequest(long from, int maxOperationCount, Co threadPool.executor(ThreadPool.Names.GENERIC).execute(task); } + @Override + protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { + final String retentionLeaseId = CcrRetentionLeases.retentionLeaseId( + "follower", + followerGroup.getPrimary().routingEntry().index(), + "remote", + leaderGroup.getPrimary().routingEntry().index()); + final PlainActionFuture response = new PlainActionFuture<>(); + leaderGroup.addRetentionLease( + retentionLeaseId, + followerGlobalCheckpoint.getAsLong(), + "ccr", + ActionListener.wrap(response::onResponse, e -> fail(e.toString()))); + response.actionGet(); + return threadPool.scheduleWithFixedDelay( + () -> leaderGroup.renewRetentionLease(retentionLeaseId, followerGlobalCheckpoint.getAsLong(), "ccr"), + CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get( + followerGroup.getPrimary().indexSettings().getSettings()), + ThreadPool.Names.GENERIC); + } + @Override protected boolean isStopped() { return super.isStopped() || stopped.get(); @@ -482,28 +610,29 @@ private void assertConsistentHistoryBetweenLeaderAndFollower(ReplicationGroup le boolean assertMaxSeqNoOfUpdatesOrDeletes) throws Exception { final List> docAndSeqNosOnLeader = getDocIdAndSeqNos(leader.getPrimary()).stream() .map(d -> Tuple.tuple(d.getId(), d.getSeqNo())).collect(Collectors.toList()); - final Set> operationsOnLeader = new HashSet<>(); - try (Translog.Snapshot snapshot = leader.getPrimary().getHistoryOperations("test", 0)) { + final Map operationsOnLeader = new HashMap<>(); + try (Translog.Snapshot snapshot = leader.getPrimary().newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { - operationsOnLeader.add(Tuple.tuple(op.seqNo(), op.opType())); + operationsOnLeader.put(op.seqNo(), op); } } for (IndexShard followingShard : follower) { if (assertMaxSeqNoOfUpdatesOrDeletes) { - assertThat(followingShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getPrimary().getMaxSeqNoOfUpdatesOrDeletes())); + assertThat(followingShard.getMaxSeqNoOfUpdatesOrDeletes(), + greaterThanOrEqualTo(leader.getPrimary().getMaxSeqNoOfUpdatesOrDeletes())); } List> docAndSeqNosOnFollower = getDocIdAndSeqNos(followingShard).stream() .map(d -> Tuple.tuple(d.getId(), d.getSeqNo())).collect(Collectors.toList()); assertThat(docAndSeqNosOnFollower, equalTo(docAndSeqNosOnLeader)); - final Set> operationsOnFollower = new HashSet<>(); - try (Translog.Snapshot snapshot = followingShard.getHistoryOperations("test", 0)) { + try (Translog.Snapshot snapshot = followingShard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false)) { Translog.Operation op; while ((op = snapshot.next()) != null) { - operationsOnFollower.add(Tuple.tuple(op.seqNo(), op.opType())); + Translog.Operation leaderOp = operationsOnLeader.get(op.seqNo()); + assertThat(TransportBulkShardOperationsAction.rewriteOperationWithPrimaryTerm(op, leaderOp.primaryTerm()), + equalTo(leaderOp)); } } - assertThat(operationsOnFollower, equalTo(operationsOnLeader)); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java index 1dfe4a9897075..94b27a2850d5b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -30,11 +30,11 @@ protected ShardFollowTask createTestInstance() { new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), randomIntBetween(1, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), randomIntBetween(1, Integer.MAX_VALUE), - new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), + new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), randomIntBetween(1, Integer.MAX_VALUE), new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES), TimeValue.parseTimeValue(randomTimeValue(), ""), @@ -45,6 +45,6 @@ protected ShardFollowTask createTestInstance() { @Override protected Writeable.Reader instanceReader() { - return ShardFollowTask::new; + return ShardFollowTask::readFrom; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java index b8f570e4ef4f6..b5f369f52472c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -50,11 +50,11 @@ static ShardFollowTask createShardFollowTask(String followerIndex) { new ShardId(followerIndex, "", 0), new ShardId("leader_index", "", 0), 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - 1, 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, + 1, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index 85e3a2fb874ea..57bc30210fa74 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -150,7 +150,10 @@ public void testValidation() throws IOException { .put("index.analysis.analyzer.my_analyzer.type", "custom") .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), customMetaData); Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); - assertThat(e.getMessage(), equalTo("the leader and follower index settings must be identical")); + assertThat(e.getMessage(), equalTo("the leader index setting[{\"index.analysis.analyzer.my_analyzer.tokenizer\"" + + ":\"whitespace\",\"index.analysis.analyzer.my_analyzer.type\":\"custom\",\"index.number_of_shards\":\"5\"}] " + + "and follower index settings [{\"index.analysis.analyzer.my_analyzer.tokenizer\":\"standard\"," + + "\"index.analysis.analyzer.my_analyzer.type\":\"custom\",\"index.number_of_shards\":\"5\"}] must be identical")); } { // should fail because the following index does not have the following_index settings @@ -242,6 +245,21 @@ public void testDynamicIndexSettingsAreClassified() { } } + public void testFilter() { + Settings.Builder settings = Settings.builder(); + settings.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), ""); + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), ""); + settings.put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), ""); + settings.put(IndexMetaData.SETTING_INDEX_UUID, ""); + settings.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, ""); + settings.put(IndexMetaData.SETTING_CREATION_DATE, ""); + settings.put(IndexMetaData.SETTING_VERSION_UPGRADED, ""); + settings.put(IndexMetaData.SETTING_VERSION_UPGRADED_STRING, ""); + + Settings result = TransportResumeFollowAction.filter(settings.build()); + assertThat(result.size(), equalTo(0)); + } + private static IndexMetaData createIMD(String index, int numberOfShards, Settings settings, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index 93987a7306f45..5b6bac6491398 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -83,11 +83,11 @@ public void testUnfollowRunningShardFollowTasks() { new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, - 1, 1024, - TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 1, + 1, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index df406a4c09a68..dfac5ef2654b8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -59,6 +59,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.index.engine.EngineTestCase.getDocIds; +import static org.elasticsearch.index.engine.EngineTestCase.getTranslog; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -288,7 +289,7 @@ private FollowingEngine createEngine(Store store, EngineConfig config) throws IO store.associateIndexWithNewTranslog(translogUuid); FollowingEngine followingEngine = new FollowingEngine(config); TranslogHandler translogHandler = new TranslogHandler(xContentRegistry(), config.getIndexSettings()); - followingEngine.initializeMaxSeqNoOfUpdatesOrDeletes(); + followingEngine.reinitializeMaxSeqNoOfUpdatesOrDeletes(); followingEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); return followingEngine; } @@ -337,7 +338,7 @@ public void testBasicOptimization() throws Exception { for (int i = 0; i < numDocs; i++) { leader.index(indexForPrimary(Integer.toString(i))); } - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -350,7 +351,7 @@ public void testBasicOptimization() throws Exception { leader.delete(deleteForPrimary(Integer.toString(i))); } } - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -362,7 +363,7 @@ public void testBasicOptimization() throws Exception { docIds.add(docId); leader.index(indexForPrimary(docId)); } - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(leader.getMaxSeqNoOfUpdatesOrDeletes())); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numDocs + moreDocs)); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); @@ -378,7 +379,7 @@ public void testOptimizeAppendOnly() throws Exception { runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); assertThat(follower.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(-1L)); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo((long) numOps)); }); } @@ -396,13 +397,13 @@ public void testOptimizeMultipleVersions() throws Exception { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); final List appendOps = new ArrayList<>(); for (int numAppends = scaledRandomIntBetween(0, 100), i = 0; i < numAppends; i++) { appendOps.add(indexForPrimary("append-" + i)); } EngineTestCase.concurrentlyApplyOps(appendOps, leader); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), greaterThanOrEqualTo((long) appendOps.size())); }); } @@ -410,19 +411,19 @@ public void testOptimizeMultipleVersions() throws Exception { public void testOptimizeSingleDocSequentially() throws Exception { runFollowTest((leader, follower) -> { leader.index(indexForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.delete(deleteForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(1L)); leader.index(indexForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); leader.index(indexForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(2L)); }); } @@ -432,20 +433,20 @@ public void testOptimizeSingleDocConcurrently() throws Exception { Randomness.shuffle(ops); runFollowTest((leader, follower) -> { EngineTestCase.concurrentlyApplyOps(ops, leader); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); long numOptimized = follower.getNumberOfOptimizedIndexing(); leader.delete(deleteForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized)); leader.index(indexForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); leader.index(indexForPrimary("id")); - follower.waitForOpsToComplete(leader.getLocalCheckpoint()); + EngineTestCase.waitForOpsToComplete(follower, leader.getLocalCheckpoint()); assertThat(follower.getNumberOfOptimizedIndexing(), equalTo(numOptimized + 1L)); }); } @@ -472,7 +473,7 @@ private void runFollowTest(CheckedBiConsumer { + globalCheckpoint.set(randomNonNegativeLong()); + try { + followingEngine.verifyEngineBeforeIndexClosing(); + } catch (final IllegalStateException e) { + fail("Following engine pre-closing verifications failed"); + } + }); + } + + public void testMaxSeqNoInCommitUserData() throws Exception { + final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT).put("index.xpack.ccr.following_index", true) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine engine = createEngine(store, engineConfig)) { + AtomicBoolean running = new AtomicBoolean(true); + Thread rollTranslog = new Thread(() -> { + while (running.get() && getTranslog(engine).currentFileGeneration() < 500) { + engine.rollTranslogGeneration(); // make adding operations to translog slower + } + }); + rollTranslog.start(); + + Thread indexing = new Thread(() -> { + List ops = EngineTestCase.generateSingleDocHistory(true, VersionType.EXTERNAL, 2, 50, 500, "id"); + engine.advanceMaxSeqNoOfUpdatesOrDeletes(ops.stream().mapToLong(Engine.Operation::seqNo).max().getAsLong()); + for (Engine.Operation op : ops) { + if (running.get() == false) { + return; + } + try { + EngineTestCase.applyOperation(engine, op); + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + indexing.start(); + + int numCommits = between(5, 20); + for (int i = 0; i < numCommits; i++) { + engine.flush(false, true); + } + running.set(false); + indexing.join(); + rollTranslog.join(); + EngineTestCase.assertMaxSeqNoInCommitUserData(engine); + } + } + } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java new file mode 100644 index 0000000000000..8f49074b502a0 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRepositoryRetentionLeaseTests.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.repository; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.mockito.ArgumentCaptor; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.elasticsearch.xpack.ccr.CcrRetentionLeases.retentionLeaseId; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CcrRepositoryRetentionLeaseTests extends ESTestCase { + + public void testWhenRetentionLeaseAlreadyExistsWeTryToRenewIt() { + final RepositoryMetaData repositoryMetaData = mock(RepositoryMetaData.class); + when(repositoryMetaData.name()).thenReturn(CcrRepository.NAME_PREFIX); + final Set> settings = + Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + CcrSettings.getSettings().stream().filter(Setting::hasNodeScope)) + .collect(Collectors.toSet()); + + final CcrRepository repository = new CcrRepository( + repositoryMetaData, + mock(Client.class), + new CcrLicenseChecker(() -> true, () -> true), + Settings.EMPTY, + new CcrSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, settings)), + mock(ThreadPool.class)); + + final ShardId followerShardId = new ShardId(new Index("follower-index-name", "follower-index-uuid"), 0); + final ShardId leaderShardId = new ShardId(new Index("leader-index-name", "leader-index-uuid"), 0); + + final String retentionLeaseId = + retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex()); + + // simulate that the retention lease already exists on the leader, and verify that we attempt to renew it + final Client remoteClient = mock(Client.class); + final ArgumentCaptor addRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onFailure(new RetentionLeaseAlreadyExistsException(retentionLeaseId)); + return null; + }) + .when(remoteClient) + .execute(same(RetentionLeaseActions.Add.INSTANCE), addRequestCaptor.capture(), any()); + final ArgumentCaptor renewRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.RenewRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(new RetentionLeaseActions.Response()); + return null; + }) + .when(remoteClient) + .execute(same(RetentionLeaseActions.Renew.INSTANCE), renewRequestCaptor.capture(), any()); + + repository.acquireRetentionLeaseOnLeader(followerShardId, retentionLeaseId, leaderShardId, remoteClient); + + verify(remoteClient).execute(same(RetentionLeaseActions.Add.INSTANCE), any(RetentionLeaseActions.AddRequest.class), any()); + assertThat(addRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(addRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(addRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(addRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verify(remoteClient).execute(same(RetentionLeaseActions.Renew.INSTANCE), any(RetentionLeaseActions.RenewRequest.class), any()); + assertThat(renewRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(renewRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(renewRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(renewRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verifyNoMoreInteractions(remoteClient); + } + + public void testWhenRetentionLeaseExpiresBeforeWeCanRenewIt() { + final RepositoryMetaData repositoryMetaData = mock(RepositoryMetaData.class); + when(repositoryMetaData.name()).thenReturn(CcrRepository.NAME_PREFIX); + final Set> settings = + Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + CcrSettings.getSettings().stream().filter(Setting::hasNodeScope)) + .collect(Collectors.toSet()); + + final CcrRepository repository = new CcrRepository( + repositoryMetaData, + mock(Client.class), + new CcrLicenseChecker(() -> true, () -> true), + Settings.EMPTY, + new CcrSettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, settings)), + mock(ThreadPool.class)); + + final ShardId followerShardId = new ShardId(new Index("follower-index-name", "follower-index-uuid"), 0); + final ShardId leaderShardId = new ShardId(new Index("leader-index-name", "leader-index-uuid"), 0); + + final String retentionLeaseId = + retentionLeaseId("local-cluster", followerShardId.getIndex(), "remote-cluster", leaderShardId.getIndex()); + + // simulate that the retention lease already exists on the leader, expires before we renew, and verify that we attempt to add it + final Client remoteClient = mock(Client.class); + final ArgumentCaptor addRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.AddRequest.class); + final PlainActionFuture response = new PlainActionFuture<>(); + response.onResponse(new RetentionLeaseActions.Response()); + doAnswer( + new Answer() { + + final AtomicBoolean firstInvocation = new AtomicBoolean(true); + + @Override + public Void answer(final InvocationOnMock invocationOnMock) { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + if (firstInvocation.compareAndSet(true, false)) { + listener.onFailure(new RetentionLeaseAlreadyExistsException(retentionLeaseId)); + } else { + listener.onResponse(new RetentionLeaseActions.Response()); + } + return null; + } + + }) + .when(remoteClient).execute(same(RetentionLeaseActions.Add.INSTANCE), addRequestCaptor.capture(), any()); + final ArgumentCaptor renewRequestCaptor = + ArgumentCaptor.forClass(RetentionLeaseActions.RenewRequest.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") final ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onFailure(new RetentionLeaseNotFoundException(retentionLeaseId)); + return null; + } + ).when(remoteClient) + .execute(same(RetentionLeaseActions.Renew.INSTANCE), renewRequestCaptor.capture(), any()); + + repository.acquireRetentionLeaseOnLeader(followerShardId, retentionLeaseId, leaderShardId, remoteClient); + + verify(remoteClient, times(2)) + .execute(same(RetentionLeaseActions.Add.INSTANCE), any(RetentionLeaseActions.AddRequest.class), any()); + assertThat(addRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(addRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(addRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(addRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verify(remoteClient).execute(same(RetentionLeaseActions.Renew.INSTANCE), any(RetentionLeaseActions.RenewRequest.class), any()); + assertThat(renewRequestCaptor.getValue().getShardId(), equalTo(leaderShardId)); + assertThat(renewRequestCaptor.getValue().getId(), equalTo(retentionLeaseId)); + assertThat(renewRequestCaptor.getValue().getRetainingSequenceNumber(), equalTo(RETAIN_ALL)); + assertThat(renewRequestCaptor.getValue().getSource(), equalTo("ccr")); + + verifyNoMoreInteractions(remoteClient); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java index 1c3c0da3d3c8a..2b5011d45139f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java @@ -12,10 +12,9 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -25,7 +24,7 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; @@ -39,10 +38,8 @@ public void setUp() throws Exception { super.setUp(); Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), "node").build(); taskQueue = new DeterministicTaskQueue(settings, random()); - Set> registeredSettings = Sets.newHashSet(CcrSettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND, CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING, - CcrSettings.RECOVERY_CHUNK_SIZE); - ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, registeredSettings); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, CcrSettings.getSettings() + .stream().filter(s -> s.hasNodeScope()).collect(Collectors.toSet())); restoreSourceService = new CcrRestoreSourceService(taskQueue.getThreadPool(), new CcrSettings(Settings.EMPTY, clusterSettings)); } @@ -202,7 +199,10 @@ public void testGetSessionDoesNotLeakFileIfClosed() throws IOException { sessionReader.readFileBytes(files.get(1).name(), new BytesArray(new byte[10])); } + assertTrue(EngineTestCase.hasSnapshottedCommits(IndexShardTestCase.getEngine(indexShard))); restoreSourceService.closeSession(sessionUUID); + assertFalse(EngineTestCase.hasSnapshottedCommits(IndexShardTestCase.getEngine(indexShard))); + closeShards(indexShard); // Exception will be thrown if file is not closed. } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java index 90d5d0c6e9277..eebed6de676ec 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java @@ -148,7 +148,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { Map template = XContentHelper.convertToMap(XContentType.JSON.xContent(), MonitoringTemplateUtils.loadTemplate("es"), false); Map autoFollowStatsMapping = - (Map) XContentMapValues.extractValue("mappings.doc.properties.ccr_auto_follow_stats.properties", template); + (Map) XContentMapValues.extractValue("mappings._doc.properties.ccr_auto_follow_stats.properties", template); assertThat(serializedStatus.size(), equalTo(autoFollowStatsMapping.size())); for (Map.Entry entry : serializedStatus.entrySet()) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java index dc3c4793d973f..fd8904307db3e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java @@ -237,7 +237,8 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { Map template = XContentHelper.convertToMap(XContentType.JSON.xContent(), MonitoringTemplateUtils.loadTemplate("es"), false); - Map followStatsMapping = (Map) XContentMapValues.extractValue("mappings.doc.properties.ccr_stats.properties", template); + Map followStatsMapping = (Map) XContentMapValues + .extractValue("mappings._doc.properties.ccr_stats.properties", template); assertThat(serializedStatus.size(), equalTo(followStatsMapping.size())); for (Map.Entry entry : serializedStatus.entrySet()) { String fieldName = entry.getKey(); diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index ed61fc9e3e703..ac90be8dbafec 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -2,7 +2,6 @@ import org.elasticsearch.gradle.MavenFilteringHack import java.nio.file.Files import java.nio.file.Paths -import com.carrotsearch.gradle.junit4.RandomizedTestingTask; apply plugin: 'elasticsearch.esplugin' apply plugin: 'nebula.maven-base-publish' @@ -48,6 +47,9 @@ dependencies { testCompile project(path: ':modules:reindex', configuration: 'runtime') testCompile project(path: ':modules:parent-join', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') + testCompile(project(':x-pack:license-tools')) { + transitive = false + } testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") } @@ -95,11 +97,11 @@ licenseHeaders { } // make LicenseSigner available for testing signed licenses -sourceSets.test.java { - srcDir '../../license-tools/src/main/java' +sourceSets.test.resources { + srcDir 'src/main/config' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. @@ -139,6 +141,6 @@ thirdPartyAudit.ignoreMissingClasses ( integTest.enabled = false // There are some integ tests that don't require a cluster, we still want to run those -task internalClusterTest(type: RandomizedTestingTask) { +task internalClusterTest(type: Test) { include "**/*IT.class" } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java index 4c52cfb5c4cd5..3c852a3ae4017 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java @@ -19,4 +19,12 @@ private InetAddressHelper() {} public static InetAddress[] getAllAddresses() throws SocketException { return NetworkUtils.getAllAddresses(); } + + public static InetAddress[] filterIPV4(InetAddress[] addresses){ + return NetworkUtils.filterIPV4(addresses); + } + + public static InetAddress[] filterIPV6(InetAddress[] addresses){ + return NetworkUtils.filterIPV6(addresses); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index 158c0eb7b2e63..e39b5b7dcc196 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -126,6 +126,10 @@ public static OperationMode resolve(String type) { throw new IllegalArgumentException("unknown type [" + type + "]"); } } + + public String description() { + return name().toLowerCase(Locale.ROOT); + } } private License(int version, String uid, String issuer, String issuedTo, long issueDate, String type, @@ -776,22 +780,4 @@ public Builder validate() { } } - /** - * Returns true iff the license is a production licnese - */ - public boolean isProductionLicense() { - switch (operationMode()) { - case MISSING: - case TRIAL: - case BASIC: - return false; - case STANDARD: - case GOLD: - case PLATINUM: - return true; - default: - throw new AssertionError("unknown operation mode: " + operationMode()); - - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 68e094511a3e8..f750d1349a0ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -114,7 +114,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste public static final String LICENSE_JOB = "licenseJob"; - private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("EEEE, MMMMM dd, yyyy"); + private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("EEEE, MMMM dd, yyyy"); private static final String ACKNOWLEDGEMENT_HEADER = "This license update requires acknowledgement. To acknowledge the license, " + "please read the following messages and update the license again, this time with the \"acknowledge=true\" parameter:"; @@ -134,11 +134,15 @@ public LicenseService(Settings settings, ClusterService clusterService, Clock cl } private void logExpirationWarning(long expirationMillis, boolean expired) { + logger.warn("{}", buildExpirationMessage(expirationMillis, expired)); + } + + static CharSequence buildExpirationMessage(long expirationMillis, boolean expired) { String expiredMsg = expired ? "expired" : "will expire"; String general = LoggerMessageFormat.format(null, "License [{}] on [{}].\n" + - "# If you have a new license, please update it. Otherwise, please reach out to\n" + - "# your support contact.\n" + - "# ", expiredMsg, DATE_FORMATTER.formatMillis(expirationMillis)); + "# If you have a new license, please update it. Otherwise, please reach out to\n" + + "# your support contact.\n" + + "# ", expiredMsg, DATE_FORMATTER.formatMillis(expirationMillis)); if (expired) { general = general.toUpperCase(Locale.ROOT); } @@ -161,7 +165,7 @@ private void logExpirationWarning(long expirationMillis, boolean expired) { } } }); - logger.warn("{}", builder); + return builder; } private void populateExpirationCallbacks() { @@ -214,10 +218,13 @@ public void registerLicense(final PutLicenseRequest request, final ActionListene } } + // This check would be incorrect if "basic" licenses were allowed here + // because the defaults there mean that security can be "off", even if the setting is "on" + // BUT basic licenses are explicitly excluded earlier in this method, so we don't need to worry if (XPackSettings.SECURITY_ENABLED.get(settings)) { // TODO we should really validate that all nodes have xpack installed and are consistently configured but this // should happen on a different level and not in this code - if (newLicense.isProductionLicense() + if (XPackLicenseState.isTransportTlsRequired(newLicense, settings) && XPackSettings.TRANSPORT_SSL_ENABLED.get(settings) == false && isProductionMode(settings, clusterService.localNode())) { // security is on but TLS is not configured we gonna fail the entire request and throw an exception diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 7cb04a9e57a4b..a61d8c67f879b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -95,13 +95,18 @@ private static String[] securityAcknowledgementMessages(OperationMode currentMod switch (newMode) { case BASIC: switch (currentMode) { - case TRIAL: case STANDARD: + return new String[] { + "Security will default to disabled (set " + XPackSettings.SECURITY_ENABLED.getKey() + " to enable security).", + }; + case TRIAL: case GOLD: case PLATINUM: return new String[] { - "The following X-Pack security functionality will be disabled: authentication, authorization, " + - "ip filtering, and auditing. Please restart your node after applying the license.", + "Security will default to disabled (set " + XPackSettings.SECURITY_ENABLED.getKey() + " to enable security).", + "Authentication will be limited to the native and file realms.", + "Security tokens and API keys will not be supported.", + "IP filtering and auditing will be disabled.", "Field and document level access control will be disabled.", "Custom realms will be ignored.", "A custom authorization engine will be ignored." @@ -125,7 +130,7 @@ private static String[] securityAcknowledgementMessages(OperationMode currentMod case STANDARD: switch (currentMode) { case BASIC: - // ^^ though technically it was already disabled, it's not bad to remind them + // ^^ though technically it doesn't change the feature set, it's not bad to remind them case GOLD: case PLATINUM: case TRIAL: @@ -277,7 +282,7 @@ private static class Status { public XPackLicenseState(Settings settings) { this.listeners = new CopyOnWriteArrayList<>(); this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); - this.isSecurityExplicitlyEnabled = isSecurityEnabled && settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + this.isSecurityExplicitlyEnabled = isSecurityEnabled && isSecurityExplicitlyEnabled(settings); } private XPackLicenseState(XPackLicenseState xPackLicenseState) { @@ -287,6 +292,10 @@ private XPackLicenseState(XPackLicenseState xPackLicenseState) { this.status = xPackLicenseState.status; } + private static boolean isSecurityExplicitlyEnabled(Settings settings) { + return settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()); + } + /** * Updates the current state of the license, which will change what features are available. * @@ -331,8 +340,17 @@ public synchronized boolean isAuthAllowed() { OperationMode mode = status.mode; final boolean isSecurityCurrentlyEnabled = isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); - return isSecurityCurrentlyEnabled && (mode == OperationMode.STANDARD || mode == OperationMode.GOLD - || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); + if (isSecurityCurrentlyEnabled) { + switch (mode) { + case BASIC: + case STANDARD: + case GOLD: + case PLATINUM: + case TRIAL: + return true; + } + } + return false; } /** @@ -405,6 +423,7 @@ public synchronized AllowedRealmType allowedRealmType() { return AllowedRealmType.ALL; case GOLD: return AllowedRealmType.DEFAULT; + case BASIC: case STANDARD: return AllowedRealmType.NATIVE; default: @@ -425,6 +444,24 @@ public synchronized boolean isCustomRoleProvidersAllowed() { && status.active; } + /** + * @return whether the Elasticsearch {@code TokenService} is allowed based on the license {@link OperationMode} + */ + public synchronized boolean isTokenServiceAllowed() { + final OperationMode mode = status.mode; + final boolean isSecurityCurrentlyEnabled = isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); + } + + /** + * @return whether the Elasticsearch {@code ApiKeyService} is allowed based on the license {@link OperationMode} + */ + public synchronized boolean isApiKeyServiceAllowed() { + final OperationMode mode = status.mode; + final boolean isSecurityCurrentlyEnabled = isSecurityEnabled(mode, isSecurityExplicitlyEnabled, isSecurityEnabled); + return isSecurityCurrentlyEnabled && (mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.TRIAL); + } + /** * @return whether "authorization_realms" are allowed based on the license {@link OperationMode} * @see org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings @@ -665,24 +702,54 @@ public synchronized boolean isTrialLicense() { public synchronized boolean isSecurityAvailable() { OperationMode mode = status.mode; return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.STANDARD || - mode == OperationMode.TRIAL; + mode == OperationMode.TRIAL || mode == OperationMode.BASIC; } /** - * @return true if security has been disabled by a trial license which is the case of the - * default distribution post 6.3.0. The conditions necessary for this are: + * @return true if security has been disabled due it being the default setting for this license type. + * The conditions necessary for this are: *
    - *
  • A trial license
  • + *
  • A trial or basic license
  • *
  • xpack.security.enabled not specified as a setting
  • *
*/ - public synchronized boolean isSecurityDisabledByTrialLicense() { - return status.mode == OperationMode.TRIAL && isSecurityEnabled && isSecurityExplicitlyEnabled == false; + public synchronized boolean isSecurityDisabledByLicenseDefaults() { + switch (status.mode) { + case TRIAL: + case BASIC: + return isSecurityEnabled && isSecurityExplicitlyEnabled == false; + } + return false; + } + + public static boolean isTransportTlsRequired(License license, Settings settings) { + if (license == null) { + return false; + } + switch (license.operationMode()) { + case STANDARD: + case GOLD: + case PLATINUM: + return XPackSettings.SECURITY_ENABLED.get(settings); + case BASIC: + return XPackSettings.SECURITY_ENABLED.get(settings) && isSecurityExplicitlyEnabled(settings); + case MISSING: + case TRIAL: + return false; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } } private static boolean isSecurityEnabled(final OperationMode mode, final boolean isSecurityExplicitlyEnabled, final boolean isSecurityEnabled) { - return mode == OperationMode.TRIAL ? isSecurityExplicitlyEnabled : isSecurityEnabled; + switch (mode) { + case TRIAL: + case BASIC: + return isSecurityExplicitlyEnabled; + default: + return isSecurityEnabled; + } } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index 196982c0a35fb..c1a682757d140 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -96,10 +96,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index 3e36fc5977491..d7f70cf8ef2e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -45,7 +45,7 @@ *

* This is a filter snapshot repository that only snapshots the minimal required information * that is needed to recreate the index. In other words instead of snapshotting the entire shard - * with all it's lucene indexed fields, doc values, points etc. it only snapshots the the stored + * with all it's lucene indexed fields, doc values, points etc. it only snapshots the stored * fields including _source and _routing as well as the live docs in oder to distinguish between * live and deleted docs. *

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index a6874a188534a..4f0e012b486d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -53,15 +53,6 @@ public final class ClientHelper { private ClientHelper() {} - /** - * Stashes the current context and sets the origin in the current context. The original context is returned as a stored context - * @deprecated use ThreadContext.stashWithOrigin - */ - @Deprecated - public static ThreadContext.StoredContext stashWithOrigin(ThreadContext threadContext, String origin) { - return threadContext.stashWithOrigin(origin); - } - /** * Returns a client that will always set the appropriate origin and ensure the proper context is restored by listeners * @deprecated use {@link OriginSettingClient} instead @@ -78,7 +69,7 @@ public static v ThreadContext threadContext, String origin, Request request, ActionListener listener, BiConsumer> consumer) { final Supplier supplier = threadContext.newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) { + try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(origin)) { consumer.accept(request, new ContextPreservingActionListener<>(supplier, listener)); } } @@ -93,7 +84,7 @@ RequestBuilder extends ActionRequestBuilder> void executeAsyn ActionListener listener) { final ThreadContext threadContext = client.threadPool().getThreadContext(); final Supplier supplier = threadContext.newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) { + try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(origin)) { client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener)); } } @@ -120,7 +111,7 @@ public static T executeWithHeaders(Map patterns, public AutoFollowMetadata(StreamInput in) throws IOException { this( - in.readMap(StreamInput::readString, AutoFollowPattern::new), + in.readMap(StreamInput::readString, AutoFollowPattern::readFrom), in.readMapOfLists(StreamInput::readString, StreamInput::readString), in.readMap(StreamInput::readString, valIn -> valIn.readMap(StreamInput::readString, StreamInput::readString)) ); @@ -175,116 +174,60 @@ public int hashCode() { return Objects.hash(patterns, followedLeaderIndexUUIDs, headers); } - public static class AutoFollowPattern implements Writeable, ToXContentObject { + public static class AutoFollowPattern extends ImmutableFollowParameters implements ToXContentObject { public static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); public static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_index_patterns"); public static final ParseField FOLLOW_PATTERN_FIELD = new ParseField("follow_index_pattern"); - public static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); - public static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); - public static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); - public static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); - public static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); - public static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); - public static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); - public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("auto_follow_pattern", args -> new AutoFollowPattern((String) args[0], (List) args[1], (String) args[2], (Integer) args[3], - (ByteSizeValue) args[4], (Integer) args[5], (Integer) args[6], (ByteSizeValue) args[7], (Integer) args[8], + (Integer) args[4], (Integer) args[5], (Integer) args[6], (ByteSizeValue) args[7], (ByteSizeValue) args[8], (Integer) args[9], (ByteSizeValue) args[10], (TimeValue) args[11], (TimeValue) args[12])); static { PARSER.declareString(ConstructingObjectParser.constructorArg(), REMOTE_CLUSTER_FIELD); PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_READ_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), - MAX_READ_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_READ_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), - MAX_WRITE_REQUEST_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_COUNT); - PARSER.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), - MAX_WRITE_BUFFER_SIZE, - ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), - MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), READ_POLL_TIMEOUT.getPreferredName()), - READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + ImmutableFollowParameters.initParser(PARSER); } private final String remoteCluster; private final List leaderIndexPatterns; private final String followIndexPattern; - private final Integer maxReadRequestOperationCount; - private final ByteSizeValue maxReadRequestSize; - private final Integer maxOutstandingReadRequests; - private final Integer maxWriteRequestOperationCount; - private final ByteSizeValue maxWriteRequestSize; - private final Integer maxOutstandingWriteRequests; - private final Integer maxWriteBufferCount; - private final ByteSizeValue maxWriteBufferSize; - private final TimeValue maxRetryDelay; - private final TimeValue pollTimeout; public AutoFollowPattern(String remoteCluster, List leaderIndexPatterns, String followIndexPattern, Integer maxReadRequestOperationCount, - ByteSizeValue maxReadRequestSize, - Integer maxOutstandingReadRequests, Integer maxWriteRequestOperationCount, - ByteSizeValue maxWriteRequestSize, + Integer maxOutstandingReadRequests, Integer maxOutstandingWriteRequests, + ByteSizeValue maxReadRequestSize, + ByteSizeValue maxWriteRequestSize, Integer maxWriteBufferCount, - ByteSizeValue maxWriteBufferSize, TimeValue maxRetryDelay, + ByteSizeValue maxWriteBufferSize, + TimeValue maxRetryDelay, TimeValue pollTimeout) { + super(maxReadRequestOperationCount, maxWriteRequestOperationCount, maxOutstandingReadRequests, maxOutstandingWriteRequests, + maxReadRequestSize, maxWriteRequestSize, maxWriteBufferCount, maxWriteBufferSize, maxRetryDelay, pollTimeout); this.remoteCluster = remoteCluster; this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; - this.maxReadRequestOperationCount = maxReadRequestOperationCount; - this.maxReadRequestSize = maxReadRequestSize; - this.maxOutstandingReadRequests = maxOutstandingReadRequests; - this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; - this.maxWriteRequestSize = maxWriteRequestSize; - this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; - this.maxWriteBufferCount = maxWriteBufferCount; - this.maxWriteBufferSize = maxWriteBufferSize; - this.maxRetryDelay = maxRetryDelay; - this.pollTimeout = pollTimeout; } - public AutoFollowPattern(StreamInput in) throws IOException { - remoteCluster = in.readString(); - leaderIndexPatterns = in.readStringList(); - followIndexPattern = in.readOptionalString(); - maxReadRequestOperationCount = in.readOptionalVInt(); - maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - maxOutstandingReadRequests = in.readOptionalVInt(); - maxWriteRequestOperationCount = in.readOptionalVInt(); - maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); - maxOutstandingWriteRequests = in.readOptionalVInt(); - maxWriteBufferCount = in.readOptionalVInt(); - maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); - maxRetryDelay = in.readOptionalTimeValue(); - pollTimeout = in.readOptionalTimeValue(); + public static AutoFollowPattern readFrom(StreamInput in) throws IOException { + return new AutoFollowPattern(in.readString(), in.readStringList(), in.readOptionalString(), in); + } + + private AutoFollowPattern(String remoteCluster, List leaderIndexPatterns, + String followIndexPattern, StreamInput in) throws IOException { + super(in); + this.remoteCluster = remoteCluster; + this.leaderIndexPatterns = leaderIndexPatterns; + this.followIndexPattern = followIndexPattern; } public boolean match(String indexName) { @@ -307,61 +250,12 @@ public String getFollowIndexPattern() { return followIndexPattern; } - public Integer getMaxReadRequestOperationCount() { - return maxReadRequestOperationCount; - } - - public Integer getMaxOutstandingReadRequests() { - return maxOutstandingReadRequests; - } - - public ByteSizeValue getMaxReadRequestSize() { - return maxReadRequestSize; - } - - public Integer getMaxWriteRequestOperationCount() { - return maxWriteRequestOperationCount; - } - - public ByteSizeValue getMaxWriteRequestSize() { - return maxWriteRequestSize; - } - - public Integer getMaxOutstandingWriteRequests() { - return maxOutstandingWriteRequests; - } - - public Integer getMaxWriteBufferCount() { - return maxWriteBufferCount; - } - - public ByteSizeValue getMaxWriteBufferSize() { - return maxWriteBufferSize; - } - - public TimeValue getMaxRetryDelay() { - return maxRetryDelay; - } - - public TimeValue getPollTimeout() { - return pollTimeout; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - out.writeOptionalVInt(maxReadRequestOperationCount); - out.writeOptionalWriteable(maxReadRequestSize); - out.writeOptionalVInt(maxOutstandingReadRequests); - out.writeOptionalVInt(maxWriteRequestOperationCount); - out.writeOptionalWriteable(maxWriteRequestSize); - out.writeOptionalVInt(maxOutstandingWriteRequests); - out.writeOptionalVInt(maxWriteBufferCount); - out.writeOptionalWriteable(maxWriteBufferSize); - out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(pollTimeout); + super.writeTo(out); } @Override @@ -371,36 +265,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (followIndexPattern != null) { builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern); } - if (maxReadRequestOperationCount != null) { - builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); - } - if (maxReadRequestSize != null) { - builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); - } - if (maxOutstandingReadRequests != null) { - builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); - } - if (maxWriteRequestOperationCount != null) { - builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); - } - if (maxWriteRequestSize != null) { - builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); - } - if (maxOutstandingWriteRequests != null) { - builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); - } - if (maxWriteBufferCount != null){ - builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); - } - if (maxWriteBufferSize != null) { - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); - } - if (maxRetryDelay != null) { - builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay); - } - if (pollTimeout != null) { - builder.field(READ_POLL_TIMEOUT.getPreferredName(), pollTimeout); - } + toXContentFragment(builder); return builder; } @@ -413,38 +278,16 @@ public boolean isFragment() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - AutoFollowPattern that = (AutoFollowPattern) o; - return Objects.equals(remoteCluster, that.remoteCluster) && - Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) && - Objects.equals(followIndexPattern, that.followIndexPattern) && - Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && - Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && - Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && - Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && - Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && - Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && - Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && - Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && - Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(pollTimeout, that.pollTimeout); + if (!super.equals(o)) return false; + AutoFollowPattern pattern = (AutoFollowPattern) o; + return remoteCluster.equals(pattern.remoteCluster) && + leaderIndexPatterns.equals(pattern.leaderIndexPatterns) && + followIndexPattern.equals(pattern.followIndexPattern); } @Override public int hashCode() { - return Objects.hash( - remoteCluster, - leaderIndexPatterns, - followIndexPattern, - maxReadRequestOperationCount, - maxReadRequestSize, - maxOutstandingReadRequests, - maxWriteRequestOperationCount, - maxWriteRequestSize, - maxOutstandingWriteRequests, - maxWriteBufferCount, - maxWriteBufferSize, - maxRetryDelay, - pollTimeout); + return Objects.hash(super.hashCode(), remoteCluster, leaderIndexPatterns, followIndexPattern); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java index 4cbd575c67b30..122ae2b0a179d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java @@ -189,7 +189,7 @@ public FollowParameters getParameters() { remoteCluster = in.readString(); leaderIndex = in.readString(); status = Status.fromString(in.readString()); - parameters = in.readOptionalWriteable(innerIn -> new FollowParameters(in)); + parameters = in.readOptionalWriteable(FollowParameters::new); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowParameters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowParameters.java index 001a79323ab38..c5649934c7a16 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowParameters.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowParameters.java @@ -14,28 +14,28 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.AbstractObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class FollowParameters implements Writeable { +public class FollowParameters implements Writeable, ToXContentObject { - static final TimeValue RETRY_DELAY_MAX = TimeValue.timeValueMinutes(5); + private static final TimeValue RETRY_DELAY_MAX = TimeValue.timeValueMinutes(5); - static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); - static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); - static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); - static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); - static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); - static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); - static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); - static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); + public static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); + public static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); + public static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); + public static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); + public static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); + public static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); + public static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); + public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); + public static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); Integer maxReadRequestOperationCount; Integer maxWriteRequestOperationCount; @@ -185,7 +185,7 @@ public ActionRequestValidationException validate() { return e; } - FollowParameters(StreamInput in) throws IOException { + public FollowParameters(StreamInput in) throws IOException { fromStreamInput(in); } @@ -216,6 +216,14 @@ void fromStreamInput(StreamInput in) throws IOException { readPollTimeout = in.readOptionalTimeValue(); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + toXContentFragment(builder); + builder.endObject(); + return builder; + } + XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException { if (maxReadRequestOperationCount != null) { builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); @@ -258,12 +266,12 @@ public static

void initParser(AbstractObjectParser< parser.declareField( FollowParameters::setMaxReadRequestSize, (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), - AutoFollowMetadata.AutoFollowPattern.MAX_READ_REQUEST_SIZE, + MAX_READ_REQUEST_SIZE, ObjectParser.ValueType.STRING); parser.declareField( FollowParameters::setMaxWriteRequestSize, (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()), - AutoFollowMetadata.AutoFollowPattern.MAX_WRITE_REQUEST_SIZE, + MAX_WRITE_REQUEST_SIZE, ObjectParser.ValueType.STRING); parser.declareInt(FollowParameters::setMaxWriteBufferCount, MAX_WRITE_BUFFER_COUNT); parser.declareField( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java new file mode 100644 index 0000000000000..d2a0b565496d6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ForgetFollowerAction.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ForgetFollowerAction extends Action { + + public static final String NAME = "indices:admin/xpack/ccr/forget_follower"; + public static final ForgetFollowerAction INSTANCE = new ForgetFollowerAction(); + + private ForgetFollowerAction() { + super(NAME); + } + + @Override + public BroadcastResponse newResponse() { + return new BroadcastResponse(); + } + + /** + * Represents a forget follower request. Note that this an expert API intended to be used only when unfollowing a follower index fails + * to emove the follower retention leases. Please be sure that you understand the purpose this API before using. + */ + public static class Request extends BroadcastRequest { + + private static final ParseField FOLLOWER_CLUSTER = new ParseField("follower_cluster"); + private static final ParseField FOLLOWER_INDEX = new ParseField("follower_index"); + private static final ParseField FOLLOWER_INDEX_UUID = new ParseField("follower_index_uuid"); + private static final ParseField LEADER_REMOTE_CLUSTER = new ParseField("leader_remote_cluster"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, () -> new String[4]); + + static { + PARSER.declareString((parameters, value) -> parameters[0] = value, FOLLOWER_CLUSTER); + PARSER.declareString((parameters, value) -> parameters[1] = value, FOLLOWER_INDEX); + PARSER.declareString((parameters, value) -> parameters[2] = value, FOLLOWER_INDEX_UUID); + PARSER.declareString((parameters, value) -> parameters[3] = value, LEADER_REMOTE_CLUSTER); + } + + public static ForgetFollowerAction.Request fromXContent( + final XContentParser parser, + final String leaderIndex) throws IOException { + final String[] parameters = PARSER.parse(parser, null); + return new Request(parameters[0], parameters[1], parameters[2], parameters[3], leaderIndex); + } + + private String followerCluster; + + /** + * The name of the cluster containing the follower index. + * + * @return the name of the cluster containing the follower index + */ + public String followerCluster() { + return followerCluster; + } + + private String followerIndex; + + /** + * The name of the follower index. + * + * @return the name of the follower index + */ + public String followerIndex() { + return followerIndex; + } + + private String followerIndexUUID; + + /** + * The UUID of the follower index. + * + * @return the UUID of the follower index + */ + public String followerIndexUUID() { + return followerIndexUUID; + } + + private String leaderRemoteCluster; + + /** + * The alias of the remote cluster containing the leader index. + * + * @return the alias of the remote cluster + */ + public String leaderRemoteCluster() { + return leaderRemoteCluster; + } + + private String leaderIndex; + + /** + * The name of the leader index. + * + * @return the name of the leader index + */ + public String leaderIndex() { + return leaderIndex; + } + + public Request() { + + } + + /** + * Construct a forget follower request. + * + * @param followerCluster the name of the cluster containing the follower index to forget + * @param followerIndex the name of follower index + * @param followerIndexUUID the UUID of the follower index + * @param leaderRemoteCluster the alias of the remote cluster containing the leader index from the perspective of the follower index + * @param leaderIndex the name of the leader index + */ + public Request( + final String followerCluster, + final String followerIndex, + final String followerIndexUUID, + final String leaderRemoteCluster, + final String leaderIndex) { + super(new String[]{leaderIndex}); + this.followerCluster = Objects.requireNonNull(followerCluster); + this.leaderIndex = Objects.requireNonNull(leaderIndex); + this.leaderRemoteCluster = Objects.requireNonNull(leaderRemoteCluster); + this.followerIndex = Objects.requireNonNull(followerIndex); + this.followerIndexUUID = Objects.requireNonNull(followerIndexUUID); + } + + public Request(final StreamInput in) throws IOException { + super.readFrom(in); + followerCluster = in.readString(); + leaderIndex = in.readString(); + leaderRemoteCluster = in.readString(); + followerIndex = in.readString(); + followerIndexUUID = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(followerCluster); + out.writeString(leaderIndex); + out.writeString(leaderRemoteCluster); + out.writeString(followerIndex); + out.writeString(followerIndexUUID); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index 098ba6dba6935..cd37692da43a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -99,7 +99,7 @@ public Map getAutoFollowPatterns() { public Response(StreamInput in) throws IOException { super.readFrom(in); - autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::new); + autoFollowPatterns = in.readMap(StreamInput::readString, AutoFollowPattern::readFrom); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java new file mode 100644 index 0000000000000..76d7f1c51f4da --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ImmutableFollowParameters.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class ImmutableFollowParameters implements Writeable { + + private final Integer maxReadRequestOperationCount; + private final Integer maxWriteRequestOperationCount; + private final Integer maxOutstandingReadRequests; + private final Integer maxOutstandingWriteRequests; + private final ByteSizeValue maxReadRequestSize; + private final ByteSizeValue maxWriteRequestSize; + private final Integer maxWriteBufferCount; + private final ByteSizeValue maxWriteBufferSize; + private final TimeValue maxRetryDelay; + private final TimeValue readPollTimeout; + + public ImmutableFollowParameters(Integer maxReadRequestOperationCount, Integer maxWriteRequestOperationCount, + Integer maxOutstandingReadRequests, Integer maxOutstandingWriteRequests, + ByteSizeValue maxReadRequestSize, ByteSizeValue maxWriteRequestSize, + Integer maxWriteBufferCount, ByteSizeValue maxWriteBufferSize, + TimeValue maxRetryDelay, TimeValue readPollTimeout) { + this.maxReadRequestOperationCount = maxReadRequestOperationCount; + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; + this.maxReadRequestSize = maxReadRequestSize; + this.maxWriteRequestSize = maxWriteRequestSize; + this.maxWriteBufferCount = maxWriteBufferCount; + this.maxWriteBufferSize = maxWriteBufferSize; + this.maxRetryDelay = maxRetryDelay; + this.readPollTimeout = readPollTimeout; + } + + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; + } + + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; + } + + public Integer getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; + } + + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; + } + + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; + } + + public Integer getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; + } + + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; + } + + public ByteSizeValue getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; + } + + public TimeValue getReadPollTimeout() { + return readPollTimeout; + } + + public ImmutableFollowParameters(StreamInput in) throws IOException { + maxReadRequestOperationCount = in.readOptionalVInt(); + maxReadRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingReadRequests = in.readOptionalVInt(); + maxWriteRequestOperationCount = in.readOptionalVInt(); + maxWriteRequestSize = in.readOptionalWriteable(ByteSizeValue::new); + maxOutstandingWriteRequests = in.readOptionalVInt(); + maxWriteBufferCount = in.readOptionalVInt(); + maxWriteBufferSize = in.readOptionalWriteable(ByteSizeValue::new); + maxRetryDelay = in.readOptionalTimeValue(); + readPollTimeout = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(maxReadRequestOperationCount); + out.writeOptionalWriteable(maxReadRequestSize); + out.writeOptionalVInt(maxOutstandingReadRequests); + out.writeOptionalVInt(maxWriteRequestOperationCount); + out.writeOptionalWriteable(maxWriteRequestSize); + out.writeOptionalVInt(maxOutstandingWriteRequests); + out.writeOptionalVInt(maxWriteBufferCount); + out.writeOptionalWriteable(maxWriteBufferSize); + out.writeOptionalTimeValue(maxRetryDelay); + out.writeOptionalTimeValue(readPollTimeout); + + } + + protected XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException { + if (maxReadRequestOperationCount != null) { + builder.field(FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxWriteRequestOperationCount != null) { + builder.field(FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + } + if (maxOutstandingReadRequests != null) { + builder.field(FollowParameters.MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + } + if (maxOutstandingWriteRequests != null) { + builder.field(FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); + } + if (maxReadRequestSize != null) { + builder.field(FollowParameters.MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxWriteRequestSize != null) { + builder.field(FollowParameters.MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); + } + if (maxWriteBufferCount != null) { + builder.field(FollowParameters.MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); + } + if (maxWriteBufferSize != null) { + builder.field(FollowParameters.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); + } + if (maxRetryDelay != null) { + builder.field(FollowParameters.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); + } + if (readPollTimeout != null) { + builder.field(FollowParameters.READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + } + return builder; + } + + public static

void initParser(ConstructingObjectParser parser) { + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_READ_REQUEST_OPERATION_COUNT); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_WRITE_REQUEST_OPERATION_COUNT); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_OUTSTANDING_READ_REQUESTS); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_OUTSTANDING_WRITE_REQUESTS); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_READ_REQUEST_SIZE.getPreferredName()), + FollowParameters.MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_WRITE_REQUEST_SIZE.getPreferredName()), + FollowParameters.MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), FollowParameters.MAX_WRITE_BUFFER_COUNT); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowParameters.MAX_WRITE_BUFFER_SIZE.getPreferredName()), + FollowParameters.MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowParameters.MAX_RETRY_DELAY.getPreferredName()), + FollowParameters.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowParameters.READ_POLL_TIMEOUT.getPreferredName()), + FollowParameters.READ_POLL_TIMEOUT, ObjectParser.ValueType.STRING); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof ImmutableFollowParameters == false) return false; + ImmutableFollowParameters that = (ImmutableFollowParameters) o; + return Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && + Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && + Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && + Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && + Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && + Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && + Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && + Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && + Objects.equals(readPollTimeout, that.readPollTimeout); + } + + @Override + public int hashCode() { + return Objects.hash( + maxReadRequestOperationCount, + maxWriteRequestOperationCount, + maxOutstandingReadRequests, + maxOutstandingWriteRequests, + maxReadRequestSize, + maxWriteRequestSize, + maxWriteBufferCount, + maxWriteBufferSize, + maxRetryDelay, + readPollTimeout + ); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java index 43305b030be83..00b115131d1fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java @@ -9,16 +9,18 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; -import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; -import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import java.util.Objects; @@ -96,6 +98,16 @@ public ActionFuture unfollow(final UnfollowAction.Request return listener; } + public void forgetFollower(final ForgetFollowerAction.Request request, final ActionListener listener) { + client.execute(ForgetFollowerAction.INSTANCE, request, listener); + } + + public ActionFuture forgetFollower(final ForgetFollowerAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(ForgetFollowerAction.INSTANCE, request, listener); + return listener; + } + public void putAutoFollowPattern( final PutAutoFollowPatternAction.Request request, final ActionListener listener) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java index b917dbf260c9c..28aa09f6c1efb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -34,6 +35,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -187,19 +189,21 @@ public int hashCode() { * @return The list of deprecation issues found in the cluster */ public static DeprecationInfoAction.Response from(ClusterState state, + NamedXContentRegistry xContentRegistry, IndexNameExpressionResolver indexNameExpressionResolver, String[] indices, IndicesOptions indicesOptions, List datafeeds, NodesDeprecationCheckResponse nodeDeprecationResponse, List> indexSettingsChecks, List> clusterSettingsChecks, - List> mlSettingsCheck) { + List> + mlSettingsCheck) { List clusterSettingsIssues = filterChecks(clusterSettingsChecks, (c) -> c.apply(state)); List nodeSettingsIssues = mergeNodeIssues(nodeDeprecationResponse); List mlSettingsIssues = new ArrayList<>(); for (DatafeedConfig config : datafeeds) { - mlSettingsIssues.addAll(filterChecks(mlSettingsCheck, (c) -> c.apply(config))); + mlSettingsIssues.addAll(filterChecks(mlSettingsCheck, (c) -> c.apply(config, xContentRegistry))); } String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, indices); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java index 7dce5e85ab75d..c0cafa8e9079e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -106,7 +106,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public Version getMinimalSupportedVersion() { - return Version.V_7_0_0; + return Version.V_6_6_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java index d37d8c0a18805..5db9eb9e90155 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStep.java @@ -5,10 +5,15 @@ */ package org.elasticsearch.xpack.core.indexlifecycle; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -17,13 +22,17 @@ import java.io.IOException; import java.util.Arrays; +import java.util.List; +import java.util.Map; import java.util.Objects; -import java.util.stream.StreamSupport; +import java.util.stream.Collectors; /** * This {@link Step} evaluates whether force_merge was successful by checking the segment count. */ public class SegmentCountStep extends AsyncWaitStep { + + private static final Logger logger = LogManager.getLogger(SegmentCountStep.class); public static final String NAME = "segment-count"; private final int maxNumSegments; @@ -41,10 +50,19 @@ public int getMaxNumSegments() { public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { getClient().admin().indices().segments(new IndicesSegmentsRequest(indexMetaData.getIndex().getName()), ActionListener.wrap(response -> { - long numberShardsLeftToMerge = - StreamSupport.stream(response.getIndices().get(indexMetaData.getIndex().getName()).spliterator(), false) - .filter(iss -> Arrays.stream(iss.getShards()).anyMatch(p -> p.getSegments().size() > maxNumSegments)).count(); - listener.onResponse(numberShardsLeftToMerge == 0, new Info(numberShardsLeftToMerge)); + IndexSegments segments = response.getIndices().get(indexMetaData.getIndex().getName()); + List unmergedShards = segments.getShards().values().stream() + .flatMap(iss -> Arrays.stream(iss.getShards())) + .filter(shardSegments -> shardSegments.getSegments().size() > maxNumSegments) + .collect(Collectors.toList()); + if (unmergedShards.size() > 0) { + Map unmergedShardCounts = unmergedShards.stream() + .collect(Collectors.toMap(ShardSegments::getShardRouting, ss -> ss.getSegments().size())); + logger.info("[{}] best effort force merge to [{}] segments did not succeed for {} shards: {}", + indexMetaData.getIndex().getName(), maxNumSegments, unmergedShards.size(), unmergedShardCounts); + } + // Force merging is best effort, so always return true that the condition has been met. + listener.onResponse(true, new Info(unmergedShards.size())); }, listener::onFailure)); } @@ -90,8 +108,12 @@ public long getNumberShardsLeftToMerge() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(MESSAGE.getPreferredName(), - "Waiting for [" + numberShardsLeftToMerge + "] shards " + "to forcemerge"); + if (numberShardsLeftToMerge == 0) { + builder.field(MESSAGE.getPreferredName(), "all shards force merged successfully"); + } else { + builder.field(MESSAGE.getPreferredName(), + "[" + numberShardsLeftToMerge + "] shards did not successfully force merge"); + } builder.field(SHARDS_TO_MERGE.getPreferredName(), numberShardsLeftToMerge); builder.endObject(); return builder; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStep.java index 953450bbc763b..73fd4d8a9b528 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStep.java @@ -5,11 +5,17 @@ */ package org.elasticsearch.xpack.core.indexlifecycle; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; +import java.util.List; + final class UnfollowFollowIndexStep extends AbstractUnfollowIndexStep { + private static final Logger logger = LogManager.getLogger(UnfollowFollowIndexStep.class); static final String NAME = "unfollow-follower-index"; @@ -25,7 +31,19 @@ void innerPerformAction(String followerIndex, Listener listener) { assert r.isAcknowledged() : "unfollow response is not acknowledged"; listener.onResponse(true); }, - listener::onFailure + exception -> { + if (exception instanceof ElasticsearchException + && ((ElasticsearchException) exception).getMetadata("es.failed_to_remove_retention_leases") != null) { + List leasesNotRemoved = ((ElasticsearchException) exception) + .getMetadata("es.failed_to_remove_retention_leases"); + logger.debug("failed to remove leader retention lease(s) {} while unfollowing index [{}], " + + "continuing with lifecycle execution", + leasesNotRemoved, followerIndex); + listener.onResponse(true); + } else { + listener.onFailure(exception); + } + } )); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java index 3cfaeba048d5f..958120b99b879 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Objects; +import java.util.Optional; /** * A step that waits until the index it's used on is no longer a leader index. @@ -57,8 +59,11 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { boolean isCurrentlyLeaderIndex = Arrays.stream(indexStats.getShards()) .map(ShardStats::getRetentionLeaseStats) - .flatMap(retentionLeaseStats -> retentionLeaseStats.retentionLeases().leases().stream()) - .anyMatch(lease -> CCR_LEASE_KEY.equals(lease.source())); + .map(Optional::ofNullable) + .map(o -> o.flatMap(stats -> Optional.ofNullable(stats.retentionLeases()))) + .map(o -> o.flatMap(leases -> Optional.ofNullable(leases.leases()))) + .map(o -> o.map(Collection::stream)) + .anyMatch(lease -> lease.isPresent() && lease.get().anyMatch(l -> CCR_LEASE_KEY.equals(l.source()))); if (isCurrentlyLeaderIndex) { listener.onResponse(false, new Info()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java index b1ec651500e0f..e4f82ad53fed1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java @@ -13,6 +13,7 @@ import java.io.IOException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; public final class MlMetaIndex { /** @@ -21,14 +22,12 @@ public final class MlMetaIndex { */ public static final String INDEX_NAME = ".ml-meta"; - public static final String TYPE = "doc"; - private MlMetaIndex() {} public static XContentBuilder docMapping() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); - builder.startObject(TYPE); + builder.startObject(SINGLE_MAPPING_NAME); ElasticsearchMappings.addMetaInformation(builder); ElasticsearchMappings.addDefaultMapping(builder); builder.startObject(ElasticsearchMappings.PROPERTIES) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 54c83e9a88a75..2ad999d82ade0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -302,7 +303,7 @@ public Builder putJobs(Collection jobs) { return this; } - public Builder putDatafeed(DatafeedConfig datafeedConfig, Map headers) { + public Builder putDatafeed(DatafeedConfig datafeedConfig, Map headers, NamedXContentRegistry xContentRegistry) { if (datafeeds.containsKey(datafeedConfig.getId())) { throw ExceptionsHelper.datafeedAlreadyExists(datafeedConfig.getId()); } @@ -310,7 +311,7 @@ public Builder putDatafeed(DatafeedConfig datafeedConfig, Map he String jobId = datafeedConfig.getJobId(); checkJobIsAvailableForDatafeed(jobId); Job job = jobs.get(jobId); - DatafeedJobValidator.validate(datafeedConfig, job); + DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry); if (headers.isEmpty() == false) { // Adjust the request, adding security headers from the current thread context diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java index 437aa40c925f2..e9da7238fad2b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java @@ -27,6 +27,7 @@ import java.util.SortedMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -71,7 +72,7 @@ public static void createAnnotationsIndexIfNecessary(Settings settings, Client c CreateIndexRequest createIndexRequest = new CreateIndexRequest(INDEX_NAME); try (XContentBuilder annotationsMapping = AnnotationIndex.annotationsMapping()) { - createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, annotationsMapping); + createIndexRequest.mapping(SINGLE_MAPPING_NAME, annotationsMapping); createIndexRequest.settings(Settings.builder() .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") @@ -111,7 +112,7 @@ public static void createAnnotationsIndexIfNecessary(Settings settings, Client c public static XContentBuilder annotationsMapping() throws IOException { XContentBuilder builder = jsonBuilder() .startObject() - .startObject(ElasticsearchMappings.DOC_TYPE); + .startObject(SINGLE_MAPPING_NAME); ElasticsearchMappings.addMetaInformation(builder); builder.startObject(ElasticsearchMappings.PROPERTIES) .startObject(Annotation.ANNOTATION.getPreferredName()) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java new file mode 100644 index 0000000000000..8585e4122e673 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; + +import java.io.IOException; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + +class AggProvider implements Writeable, ToXContentObject { + + private static final Logger logger = LogManager.getLogger(AggProvider.class); + + private Exception parsingException; + private AggregatorFactories.Builder parsedAggs; + private Map aggs; + + static AggProvider fromXContent(XContentParser parser, boolean lenient) throws IOException { + Map aggs = parser.mapOrdered(); + AggregatorFactories.Builder parsedAggs = null; + Exception exception = null; + try { + if (aggs.isEmpty()) { + throw new Exception("aggs cannot be empty"); + } + parsedAggs = XContentObjectTransformer.aggregatorTransformer(parser.getXContentRegistry()).fromMap(aggs); + } catch(Exception ex) { + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + exception = ex; + if (lenient) { + logger.warn(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, ex); + } else { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, ex); + } + } + return new AggProvider(aggs, parsedAggs, exception); + } + + static AggProvider fromParsedAggs(AggregatorFactories.Builder parsedAggs) throws IOException { + return parsedAggs == null ? + null : + new AggProvider( + XContentObjectTransformer.aggregatorTransformer(NamedXContentRegistry.EMPTY).toMap(parsedAggs), + parsedAggs, + null); + } + + static AggProvider fromStream(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers + return new AggProvider(in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException()); + } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects + return new AggProvider(in.readMap(), null, null); + } else { // only supports eagerly parsed objects + // Upstream, we have read the bool already and know for sure that we have parsed aggs in the stream + return AggProvider.fromParsedAggs(new AggregatorFactories.Builder(in)); + } + } + + AggProvider(Map aggs, AggregatorFactories.Builder parsedAggs, Exception parsingException) { + this.aggs = Collections.unmodifiableMap(new LinkedHashMap<>(Objects.requireNonNull(aggs, "[aggs] must not be null"))); + this.parsedAggs = parsedAggs; + this.parsingException = parsingException; + } + + AggProvider(AggProvider other) { + this.aggs = new LinkedHashMap<>(other.aggs); + this.parsedAggs = other.parsedAggs; + this.parsingException = other.parsingException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers + out.writeMap(aggs); + out.writeOptionalWriteable(parsedAggs); + out.writeException(parsingException); + } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects + // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as + // they already have the ability to fully parse the passed Maps + out.writeMap(aggs); + } else { // only supports eagerly parsed objects + if (parsingException != null) { + if (parsingException instanceof IOException) { + throw (IOException) parsingException; + } else { + throw new ElasticsearchException(parsingException); + } + } else if (parsedAggs == null) { + // This is an admittedly rare case but we should fail early instead of writing null when there + // actually are aggregations defined + throw new ElasticsearchException("Unsupported operation: parsed aggregations are null"); + } + // Upstream we already verified that this calling object is not null, no need to write a second boolean to the stream + parsedAggs.writeTo(out); + } + } + + public Exception getParsingException() { + return parsingException; + } + + AggregatorFactories.Builder getParsedAggs() { + return parsedAggs; + } + + public Map getAggs() { + return aggs; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + AggProvider that = (AggProvider) other; + + return Objects.equals(this.aggs, that.aggs) + && Objects.equals(this.parsedAggs, that.parsedAggs) + && Objects.equals(this.parsingException, that.parsingException); + } + + @Override + public int hashCode() { + return Objects.hash(aggs, parsedAggs, parsingException); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.map(aggs); + return builder; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index ed858b58dd484..ba4334554ad62 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -5,24 +5,22 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CachedSupplier; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -43,7 +41,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -66,52 +63,12 @@ public class DatafeedConfig extends AbstractDiffable implements private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE; private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE; private static final int HALF_DAY_SECONDS = 12 * 60 * SECONDS_IN_MINUTE; - static final XContentObjectTransformer QUERY_TRANSFORMER = XContentObjectTransformer.queryBuilderTransformer(); - static final TriFunction, String, List, QueryBuilder> lazyQueryParser = - (objectMap, id, warnings) -> { - try { - return QUERY_TRANSFORMER.fromMap(objectMap, warnings); - } catch (IOException | XContentParseException exception) { - // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user - if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, exception, id), - exception); - } - } - }; - - static final XContentObjectTransformer AGG_TRANSFORMER = XContentObjectTransformer.aggregatorTransformer(); - static final TriFunction, String, List, AggregatorFactories.Builder> lazyAggParser = - (objectMap, id, warnings) -> { - try { - return AGG_TRANSFORMER.fromMap(objectMap, warnings); - } catch (IOException | XContentParseException exception) { - // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user - if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, exception.getMessage(), id), - exception); - } - } - }; // Used for QueryPage public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); public static String TYPE = "datafeed"; + private static final Logger logger = LogManager.getLogger(DatafeedConfig.class); /** * The field name used to specify document counts in Elasticsearch * aggregations @@ -164,15 +121,15 @@ private static ObjectParser createParser(boolean ignoreUnknownFie builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); parser.declareString((builder, val) -> builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); - if (ignoreUnknownFields) { - parser.declareObject(Builder::setQuery, (p, c) -> p.mapOrdered(), QUERY); - parser.declareObject(Builder::setAggregations, (p, c) -> p.mapOrdered(), AGGREGATIONS); - parser.declareObject(Builder::setAggregations, (p, c) -> p.mapOrdered(), AGGS); - } else { - parser.declareObject(Builder::setParsedQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); - parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); - parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); - } + parser.declareObject(Builder::setQueryProvider, + (p, c) -> QueryProvider.fromXContent(p, ignoreUnknownFields), + QUERY); + parser.declareObject(Builder::setAggregationsSafe, + (p, c) -> AggProvider.fromXContent(p, ignoreUnknownFields), + AGGREGATIONS); + parser.declareObject(Builder::setAggregationsSafe, + (p, c) -> AggProvider.fromXContent(p, ignoreUnknownFields), + AGGS); parser.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -210,18 +167,16 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final TimeValue frequency; private final List indices; - private final Map query; - private final Map aggregations; + private final QueryProvider queryProvider; + private final AggProvider aggProvider; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final Map headers; private final DelayedDataCheckConfig delayedDataCheckConfig; - private final CachedSupplier querySupplier; - private final CachedSupplier aggSupplier; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, - Map query, Map aggregations, List scriptFields, + QueryProvider queryProvider, AggProvider aggProvider, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig, Map headers, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; @@ -229,15 +184,13 @@ private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue this.queryDelay = queryDelay; this.frequency = frequency; this.indices = indices == null ? null : Collections.unmodifiableList(indices); - this.query = query == null ? null : Collections.unmodifiableMap(query); - this.aggregations = aggregations == null ? null : Collections.unmodifiableMap(aggregations); + this.queryProvider = queryProvider == null ? null : new QueryProvider(queryProvider); + this.aggProvider = aggProvider == null ? null : new AggProvider(aggProvider); this.scriptFields = scriptFields == null ? null : Collections.unmodifiableList(scriptFields); this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; this.headers = Collections.unmodifiableMap(headers); this.delayedDataCheckConfig = delayedDataCheckConfig; - this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id, new ArrayList<>())); - this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id, new ArrayList<>())); } public DatafeedConfig(StreamInput in) throws IOException { @@ -256,17 +209,11 @@ public DatafeedConfig(StreamInput in) throws IOException { in.readStringList(); } } - if (in.getVersion().before(Version.V_6_6_0)) { - this.query = QUERY_TRANSFORMER.toMap(in.readNamedWriteable(QueryBuilder.class)); - this.aggregations = AGG_TRANSFORMER.toMap(in.readOptionalWriteable(AggregatorFactories.Builder::new)); - } else { - this.query = in.readMap(); - if (in.readBoolean()) { - this.aggregations = in.readMap(); - } else { - this.aggregations = null; - } - } + // each of these writables are version aware + this.queryProvider = QueryProvider.fromStream(in); + // This reads a boolean from the stream, if true, it sends the stream to the `fromStream` method + this.aggProvider = in.readOptionalWriteable(AggProvider::fromStream); + if (in.readBoolean()) { this.scriptFields = Collections.unmodifiableList(in.readList(SearchSourceBuilder.ScriptField::new)); } else { @@ -284,8 +231,6 @@ public DatafeedConfig(StreamInput in) throws IOException { } else { delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); } - this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id, new ArrayList<>())); - this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id, new ArrayList<>())); } /** @@ -326,62 +271,116 @@ public Integer getScrollSize() { return scrollSize; } - public QueryBuilder getParsedQuery() { - return querySupplier.get(); + /** + * Get the fully parsed query from the semi-parsed stored {@code Map} + * + * @param namedXContentRegistry XContent registry to transform the lazily parsed query + * @return Fully parsed query + */ + public QueryBuilder getParsedQuery(NamedXContentRegistry namedXContentRegistry) { + return queryProvider == null ? null : parseQuery(namedXContentRegistry, new ArrayList<>()); + } + + // TODO Remove in v8.0.0 + // We only need this NamedXContentRegistry object if getParsedQuery() == null and getParsingException() == null + // This situation only occurs in past versions that contained the lazy parsing support but not the providers (6.6.x) + // We will still need `NamedXContentRegistry` for getting deprecations, but that is a special situation + private QueryBuilder parseQuery(NamedXContentRegistry namedXContentRegistry, List deprecations) { + try { + return queryProvider == null || queryProvider.getQuery() == null ? + null : + XContentObjectTransformer.queryBuilderTransformer(namedXContentRegistry).fromMap(queryProvider.getQuery(), deprecations); + } catch (Exception exception) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + if (exception.getCause() instanceof IllegalArgumentException) { + exception = (Exception)exception.getCause(); + } + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, exception); + } + } + + Exception getQueryParsingException() { + return queryProvider == null ? null : queryProvider.getParsingException(); } /** - * Calls the lazy parser and returns any gathered deprecations + * Calls the parser and returns any gathered deprecations + * + * @param namedXContentRegistry XContent registry to transform the lazily parsed query * @return The deprecations from parsing the query */ - public List getQueryDeprecations() { - return getQueryDeprecations(lazyQueryParser); - } - - List getQueryDeprecations(TriFunction, String, List, QueryBuilder> parser) { + public List getQueryDeprecations(NamedXContentRegistry namedXContentRegistry) { List deprecations = new ArrayList<>(); - parser.apply(query, id, deprecations); + parseQuery(namedXContentRegistry, deprecations); return deprecations; } public Map getQuery() { - return query; + return queryProvider == null ? null : queryProvider.getQuery(); + } + + /** + * Fully parses the semi-parsed {@code Map} aggregations + * + * @param namedXContentRegistry XContent registry to transform the lazily parsed aggregations + * @return The fully parsed aggregations + */ + public AggregatorFactories.Builder getParsedAggregations(NamedXContentRegistry namedXContentRegistry) { + return aggProvider == null ? null : parseAggregations(namedXContentRegistry, new ArrayList<>()); + } + + // TODO refactor in v8.0.0 + // We only need this NamedXContentRegistry object if getParsedQuery() == null and getParsingException() == null + // This situation only occurs in past versions that contained the lazy parsing support but not the providers (6.6.x) + // We will still need `NamedXContentRegistry` for getting deprecations, but that is a special situation + private AggregatorFactories.Builder parseAggregations(NamedXContentRegistry namedXContentRegistry, List deprecations) { + try { + return aggProvider == null || aggProvider.getAggs() == null ? + null : + XContentObjectTransformer.aggregatorTransformer(namedXContentRegistry).fromMap(aggProvider.getAggs(), deprecations); + } catch (Exception exception) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + if (exception.getCause() instanceof IllegalArgumentException) { + exception = (Exception)exception.getCause(); + } + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, exception); + } } - public AggregatorFactories.Builder getParsedAggregations() { - return aggSupplier.get(); + Exception getAggParsingException() { + return aggProvider == null ? null : aggProvider.getParsingException(); } /** - * Calls the lazy parser and returns any gathered deprecations + * Calls the parser and returns any gathered deprecations + * + * @param namedXContentRegistry XContent registry to transform the lazily parsed aggregations * @return The deprecations from parsing the aggregations */ - public List getAggDeprecations() { - return getAggDeprecations(lazyAggParser); - } - - List getAggDeprecations(TriFunction, String, List, AggregatorFactories.Builder> parser) { + public List getAggDeprecations(NamedXContentRegistry namedXContentRegistry) { List deprecations = new ArrayList<>(); - parser.apply(aggregations, id, deprecations); + parseAggregations(namedXContentRegistry, deprecations); return deprecations; } public Map getAggregations() { - return aggregations; + return aggProvider == null ? null : aggProvider.getAggs(); } /** * Returns the histogram's interval as epoch millis. + * + * @param namedXContentRegistry XContent registry to transform the lazily parsed aggregations */ - public long getHistogramIntervalMillis() { - return ExtractorUtils.getHistogramIntervalMillis(getParsedAggregations()); + public long getHistogramIntervalMillis(NamedXContentRegistry namedXContentRegistry) { + return ExtractorUtils.getHistogramIntervalMillis(getParsedAggregations(namedXContentRegistry)); } /** * @return {@code true} when there are non-empty aggregations, {@code false} otherwise */ public boolean hasAggregations() { - return aggregations != null && aggregations.size() > 0; + return aggProvider != null && aggProvider.getAggs() != null && aggProvider.getAggs().size() > 0; } public List getScriptFields() { @@ -418,16 +417,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeStringCollection(Collections.emptyList()); } - if (out.getVersion().before(Version.V_6_6_0)) { - out.writeNamedWriteable(getParsedQuery()); - out.writeOptionalWriteable(getParsedAggregations()); - } else { - out.writeMap(query); - out.writeBoolean(aggregations != null); - if (aggregations != null) { - out.writeMap(aggregations); - } - } + + // Each of these writables are version aware + queryProvider.writeTo(out); // never null + // This writes a boolean to the stream, if true, it sends the stream to the `writeTo` method + out.writeOptionalWriteable(aggProvider); + if (scriptFields != null) { out.writeBoolean(true); out.writeList(scriptFields); @@ -457,9 +452,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } builder.field(INDICES.getPreferredName(), indices); - builder.field(QUERY.getPreferredName(), query); - if (aggregations != null) { - builder.field(AGGREGATIONS.getPreferredName(), aggregations); + builder.field(QUERY.getPreferredName(), queryProvider.getQuery()); + if (aggProvider != null) { + builder.field(AGGREGATIONS.getPreferredName(), aggProvider.getAggs()); } if (scriptFields != null) { builder.startObject(SCRIPT_FIELDS.getPreferredName()); @@ -504,9 +499,9 @@ public boolean equals(Object other) { && Objects.equals(this.frequency, that.frequency) && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) - && Objects.equals(this.query, that.query) + && Objects.equals(this.queryProvider, that.queryProvider) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.aggProvider, that.aggProvider) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig) && Objects.equals(this.headers, that.headers) @@ -515,7 +510,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, query, scrollSize, aggregations, scriptFields, chunkingConfig, + return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig, headers, delayedDataCheckConfig); } @@ -541,10 +536,10 @@ public String toString() { * @param bucketSpan the bucket span * @return the default frequency */ - public TimeValue defaultFrequency(TimeValue bucketSpan) { + public TimeValue defaultFrequency(TimeValue bucketSpan, NamedXContentRegistry xContentRegistry) { TimeValue defaultFrequency = defaultFrequencyTarget(bucketSpan); if (hasAggregations()) { - long histogramIntervalMillis = getHistogramIntervalMillis(); + long histogramIntervalMillis = getHistogramIntervalMillis(xContentRegistry); long targetFrequencyMillis = defaultFrequency.millis(); long defaultFrequencyMillis = histogramIntervalMillis > targetFrequencyMillis ? histogramIntervalMillis : (targetFrequencyMillis / histogramIntervalMillis) * histogramIntervalMillis; @@ -582,8 +577,8 @@ public static class Builder { private TimeValue queryDelay; private TimeValue frequency; private List indices = Collections.emptyList(); - private Map query; - private Map aggregations; + private QueryProvider queryProvider = QueryProvider.defaultQuery(); + private AggProvider aggProvider; private List scriptFields; private Integer scrollSize = DEFAULT_SCROLL_SIZE; private ChunkingConfig chunkingConfig; @@ -591,9 +586,6 @@ public static class Builder { private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); public Builder() { - try { - this.query = QUERY_TRANSFORMER.toMap(QueryBuilders.matchAllQuery()); - } catch (IOException ex) { /*Should never happen*/ } } public Builder(String id, String jobId) { @@ -608,8 +600,8 @@ public Builder(DatafeedConfig config) { this.queryDelay = config.queryDelay; this.frequency = config.frequency; this.indices = new ArrayList<>(config.indices); - this.query = config.query == null ? null : new LinkedHashMap<>(config.query); - this.aggregations = config.aggregations == null ? null : new LinkedHashMap<>(config.aggregations); + this.queryProvider = config.queryProvider == null ? null : new QueryProvider(config.queryProvider); + this.aggProvider = config.aggProvider == null ? null : new AggProvider(config.aggProvider); this.scriptFields = config.scriptFields == null ? null : new ArrayList<>(config.scriptFields); this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; @@ -647,48 +639,39 @@ public void setFrequency(TimeValue frequency) { this.frequency = frequency; } - public void setParsedQuery(QueryBuilder query) { - try { - setQuery(QUERY_TRANSFORMER.toMap(ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()))); - } catch (IOException | XContentParseException exception) { - if (exception.getCause() instanceof IllegalArgumentException) { - // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id, exception.getMessage()), exception); - } - } + public void setQueryProvider(QueryProvider queryProvider) { + this.queryProvider = ExceptionsHelper.requireNonNull(queryProvider, QUERY.getPreferredName()); } - public void setQuery(Map query) { - this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()); + // For testing only + public void setParsedQuery(QueryBuilder queryBuilder) { + try { + this.queryProvider = ExceptionsHelper.requireNonNull(QueryProvider.fromParsedQuery(queryBuilder), QUERY.getPreferredName()); + } catch (IOException exception) { + // eat exception as it should never happen + logger.error("Exception trying to setParsedQuery", exception); + } } + // For testing only public void setParsedAggregations(AggregatorFactories.Builder aggregations) { try { - setAggregations(AGG_TRANSFORMER.toMap(aggregations)); - } catch (IOException | XContentParseException exception) { - // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user - if (exception.getCause() instanceof IllegalArgumentException) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, - id, - exception.getCause().getMessage()), - exception.getCause()); - } else { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id, exception.getMessage()), exception); - } + this.aggProvider = AggProvider.fromParsedAggs(aggregations); + } catch (IOException exception) { + // eat exception as it should never happen + logger.error("Exception trying to setParsedAggregations", exception); } } - void setAggregations(Map aggregations) { - this.aggregations = aggregations; + private void setAggregationsSafe(AggProvider aggProvider) { + if (this.aggProvider != null) { + throw ExceptionsHelper.badRequestException("Found two aggregation definitions: [aggs] and [aggregations]"); + } + this.aggProvider = aggProvider; + } + + public void setAggProvider(AggProvider aggProvider) { + this.aggProvider = aggProvider; } public void setScriptFields(List scriptFields) { @@ -731,12 +714,12 @@ public DatafeedConfig build() { setDefaultChunkingConfig(); setDefaultQueryDelay(); - return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize, + return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize, chunkingConfig, headers, delayedDataCheckConfig); } void validateScriptFields() { - if (aggregations == null) { + if (aggProvider == null) { return; } if (scriptFields != null && !scriptFields.isEmpty()) { @@ -782,11 +765,13 @@ private static void checkHistogramIntervalIsPositive(AggregationBuilder histogra private void setDefaultChunkingConfig() { if (chunkingConfig == null) { - if (aggregations == null) { + if (aggProvider == null || aggProvider.getParsedAggs() == null) { chunkingConfig = ChunkingConfig.newAuto(); } else { - long histogramIntervalMillis = - ExtractorUtils.getHistogramIntervalMillis(lazyAggParser.apply(aggregations, id, new ArrayList<>())); + long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(aggProvider.getParsedAggs()); + if (histogramIntervalMillis <= 0) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_GREATER_THAN_ZERO); + } chunkingConfig = ChunkingConfig.newManual(TimeValue.timeValueMillis( DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java index 8a49b9554451f..4c2e338db09e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.messages.Messages; @@ -21,15 +22,15 @@ private DatafeedJobValidator() {} * @param datafeedConfig the datafeed config * @param job the job */ - public static void validate(DatafeedConfig datafeedConfig, Job job) { + public static void validate(DatafeedConfig datafeedConfig, Job job, NamedXContentRegistry xContentRegistry) { AnalysisConfig analysisConfig = job.getAnalysisConfig(); if (analysisConfig.getLatency() != null && analysisConfig.getLatency().seconds() > 0) { throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY)); } if (datafeedConfig.hasAggregations()) { checkSummaryCountFieldNameIsSet(analysisConfig); - checkValidHistogramInterval(datafeedConfig, analysisConfig); - checkFrequencyIsMultipleOfHistogramInterval(datafeedConfig); + checkValidHistogramInterval(datafeedConfig, analysisConfig, xContentRegistry); + checkFrequencyIsMultipleOfHistogramInterval(datafeedConfig, xContentRegistry); } DelayedDataCheckConfig delayedDataCheckConfig = datafeedConfig.getDelayedDataCheckConfig(); @@ -64,8 +65,10 @@ private static void checkSummaryCountFieldNameIsSet(AnalysisConfig analysisConfi } } - private static void checkValidHistogramInterval(DatafeedConfig datafeedConfig, AnalysisConfig analysisConfig) { - long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(); + private static void checkValidHistogramInterval(DatafeedConfig datafeedConfig, + AnalysisConfig analysisConfig, + NamedXContentRegistry xContentRegistry) { + long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); long bucketSpanMillis = analysisConfig.getBucketSpan().millis(); if (histogramIntervalMillis > bucketSpanMillis) { throw ExceptionsHelper.badRequestException(Messages.getMessage( @@ -82,10 +85,10 @@ private static void checkValidHistogramInterval(DatafeedConfig datafeedConfig, A } } - private static void checkFrequencyIsMultipleOfHistogramInterval(DatafeedConfig datafeedConfig) { + private static void checkFrequencyIsMultipleOfHistogramInterval(DatafeedConfig datafeedConfig, NamedXContentRegistry xContentRegistry) { TimeValue frequency = datafeedConfig.getFrequency(); if (frequency != null) { - long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(); + long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); long frequencyMillis = frequency.millis(); if (frequencyMillis % histogramIntervalMillis != 0) { throw ExceptionsHelper.badRequestException(Messages.getMessage( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 23c2eeccc6a59..32fbb2526549a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -12,18 +12,18 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; import java.io.IOException; import java.util.ArrayList; @@ -52,12 +52,13 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency( TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); - PARSER.declareObject(Builder::setQuery, - (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations,(p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGS); + PARSER.declareObject(Builder::setQuery, (p, c) -> QueryProvider.fromXContent(p, false), DatafeedConfig.QUERY); + PARSER.declareObject(Builder::setAggregationsSafe, + (p, c) -> AggProvider.fromXContent(p, false), + DatafeedConfig.AGGREGATIONS); + PARSER.declareObject(Builder::setAggregationsSafe, + (p, c) -> AggProvider.fromXContent(p, false), + DatafeedConfig.AGGS); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -78,23 +79,25 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { private final TimeValue queryDelay; private final TimeValue frequency; private final List indices; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final QueryProvider queryProvider; + private final AggProvider aggProvider; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final DelayedDataCheckConfig delayedDataCheckConfig; - private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, QueryBuilder query, - AggregatorFactories.Builder aggregations, List scriptFields, Integer scrollSize, - ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { + + private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, + QueryProvider queryProvider, AggProvider aggProvider, + List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; this.frequency = frequency; this.indices = indices; - this.query = query; - this.aggregations = aggregations; + this.queryProvider = queryProvider; + this.aggProvider = aggProvider; this.scriptFields = scriptFields; this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; @@ -117,8 +120,13 @@ public DatafeedUpdate(StreamInput in) throws IOException { in.readStringList(); } } - this.query = in.readOptionalNamedWriteable(QueryBuilder.class); - this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + if (in.getVersion().before(Version.V_7_0_0)) { + this.queryProvider = QueryProvider.fromParsedQuery(in.readOptionalNamedWriteable(QueryBuilder.class)); + this.aggProvider = AggProvider.fromParsedAggs(in.readOptionalWriteable(AggregatorFactories.Builder::new)); + } else { + this.queryProvider = in.readOptionalWriteable(QueryProvider::fromStream); + this.aggProvider = in.readOptionalWriteable(AggProvider::fromStream); + } if (in.readBoolean()) { this.scriptFields = in.readList(SearchSourceBuilder.ScriptField::new); } else { @@ -158,8 +166,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeStringCollection(Collections.emptyList()); } - out.writeOptionalNamedWriteable(query); - out.writeOptionalWriteable(aggregations); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalNamedWriteable(queryProvider == null ? null : queryProvider.getParsedQuery()); + out.writeOptionalWriteable(aggProvider == null ? null : aggProvider.getParsedAggs()); + } else { + out.writeOptionalWriteable(queryProvider); + out.writeOptionalWriteable(aggProvider); + } if (scriptFields != null) { out.writeBoolean(true); out.writeList(scriptFields); @@ -185,8 +198,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); } addOptionalField(builder, DatafeedConfig.INDICES, indices); - addOptionalField(builder, DatafeedConfig.QUERY, query); - addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations); + if (queryProvider != null) { + builder.field(DatafeedConfig.QUERY.getPreferredName(), queryProvider.getQuery()); + } + if (aggProvider != null) { + builder.field(DatafeedConfig.AGGREGATIONS.getPreferredName(), aggProvider.getAggs()); + } if (scriptFields != null) { builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { @@ -227,19 +244,22 @@ Integer getScrollSize() { return scrollSize; } - QueryBuilder getQuery() { - return query; + Map getQuery() { + return queryProvider == null ? null : queryProvider.getQuery(); } - AggregatorFactories.Builder getAggregations() { - return aggregations; + QueryBuilder getParsedQuery(NamedXContentRegistry namedXContentRegistry) throws IOException { + return XContentObjectTransformer.queryBuilderTransformer(namedXContentRegistry).fromMap(queryProvider.getQuery(), + new ArrayList<>()); } - /** - * Returns the histogram's interval as epoch millis. - */ - long getHistogramIntervalMillis() { - return ExtractorUtils.getHistogramIntervalMillis(aggregations); + Map getAggregations() { + return aggProvider == null ? null : aggProvider.getAggs(); + } + + AggregatorFactories.Builder getParsedAgg(NamedXContentRegistry namedXContentRegistry) throws IOException { + return XContentObjectTransformer.aggregatorTransformer(namedXContentRegistry).fromMap(aggProvider.getAggs(), + new ArrayList<>()); } /** @@ -247,7 +267,7 @@ long getHistogramIntervalMillis() { * otherwise */ boolean hasAggregations() { - return aggregations != null && aggregations.count() > 0; + return getAggregations() != null && getAggregations().size() > 0; } List getScriptFields() { @@ -284,12 +304,12 @@ public DatafeedConfig apply(DatafeedConfig datafeedConfig, Map h if (indices != null) { builder.setIndices(indices); } - if (query != null) { - builder.setParsedQuery(query); + if (queryProvider != null) { + builder.setQueryProvider(queryProvider); } - if (aggregations != null) { - DatafeedConfig.validateAggregations(aggregations); - builder.setParsedAggregations(aggregations); + if (aggProvider != null) { + DatafeedConfig.validateAggregations(aggProvider.getParsedAggs()); + builder.setAggProvider(aggProvider); } if (scriptFields != null) { builder.setScriptFields(scriptFields); @@ -337,9 +357,9 @@ public boolean equals(Object other) { && Objects.equals(this.frequency, that.frequency) && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) - && Objects.equals(this.query, that.query) + && Objects.equals(this.queryProvider, that.queryProvider) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.aggProvider, that.aggProvider) && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); @@ -347,7 +367,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, query, scrollSize, aggregations, scriptFields, chunkingConfig, + return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig, delayedDataCheckConfig); } @@ -360,9 +380,9 @@ boolean isNoop(DatafeedConfig datafeed) { return (frequency == null || Objects.equals(frequency, datafeed.getFrequency())) && (queryDelay == null || Objects.equals(queryDelay, datafeed.getQueryDelay())) && (indices == null || Objects.equals(indices, datafeed.getIndices())) - && (query == null || Objects.equals(query, datafeed.getParsedQuery())) + && (queryProvider == null || Objects.equals(queryProvider.getQuery(), datafeed.getQuery())) && (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay())) - && (aggregations == null || Objects.equals(aggregations, datafeed.getParsedAggregations())) + && (aggProvider == null || Objects.equals(aggProvider.getAggs(), datafeed.getAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())); @@ -375,8 +395,8 @@ public static class Builder { private TimeValue queryDelay; private TimeValue frequency; private List indices; - private QueryBuilder query; - private AggregatorFactories.Builder aggregations; + private QueryProvider queryProvider; + private AggProvider aggProvider; private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; @@ -395,8 +415,8 @@ public Builder(DatafeedUpdate config) { this.queryDelay = config.queryDelay; this.frequency = config.frequency; this.indices = config.indices; - this.query = config.query; - this.aggregations = config.aggregations; + this.queryProvider = config.queryProvider; + this.aggProvider = config.aggProvider; this.scriptFields = config.scriptFields; this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; @@ -423,12 +443,19 @@ public void setFrequency(TimeValue frequency) { this.frequency = frequency; } - public void setQuery(QueryBuilder query) { - this.query = query; + public void setQuery(QueryProvider queryProvider) { + this.queryProvider = queryProvider; + } + + private void setAggregationsSafe(AggProvider aggProvider) { + if (this.aggProvider != null) { + throw ExceptionsHelper.badRequestException("Found two aggregation definitions: [aggs] and [aggregations]"); + } + setAggregations(aggProvider); } - public void setAggregations(AggregatorFactories.Builder aggregations) { - this.aggregations = aggregations; + public void setAggregations(AggProvider aggProvider) { + this.aggProvider = aggProvider; } public void setScriptFields(List scriptFields) { @@ -450,7 +477,7 @@ public void setChunkingConfig(ChunkingConfig chunkingConfig) { } public DatafeedUpdate build() { - return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize, + return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize, chunkingConfig, delayedDataCheckConfig); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java new file mode 100644 index 0000000000000..4e078b1e6440d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProvider.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; + +import java.io.IOException; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; + +class QueryProvider implements Writeable, ToXContentObject { + + private static final Logger logger = LogManager.getLogger(QueryProvider.class); + + private Exception parsingException; + private QueryBuilder parsedQuery; + private Map query; + + static QueryProvider defaultQuery() { + return new QueryProvider( + Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()), + QueryBuilders.matchAllQuery(), + null); + } + + static QueryProvider fromXContent(XContentParser parser, boolean lenient) throws IOException { + Map query = parser.mapOrdered(); + QueryBuilder parsedQuery = null; + Exception exception = null; + try { + parsedQuery = XContentObjectTransformer.queryBuilderTransformer(parser.getXContentRegistry()).fromMap(query); + } catch(Exception ex) { + if (ex.getCause() instanceof IllegalArgumentException) { + ex = (Exception)ex.getCause(); + } + exception = ex; + if (lenient) { + logger.warn(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, ex); + } else { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, ex); + } + } + return new QueryProvider(query, parsedQuery, exception); + } + + static QueryProvider fromParsedQuery(QueryBuilder parsedQuery) throws IOException { + return parsedQuery == null ? + null : + new QueryProvider( + XContentObjectTransformer.queryBuilderTransformer(NamedXContentRegistry.EMPTY).toMap(parsedQuery), + parsedQuery, + null); + } + + static QueryProvider fromStream(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers + return new QueryProvider(in.readMap(), in.readOptionalNamedWriteable(QueryBuilder.class), in.readException()); + } else if (in.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects + return new QueryProvider(in.readMap(), null, null); + } else { // only supports eagerly parsed objects + return QueryProvider.fromParsedQuery(in.readNamedWriteable(QueryBuilder.class)); + } + } + + QueryProvider(Map query, QueryBuilder parsedQuery, Exception parsingException) { + this.query = Collections.unmodifiableMap(new LinkedHashMap<>(Objects.requireNonNull(query, "[query] must not be null"))); + this.parsedQuery = parsedQuery; + this.parsingException = parsingException; + } + + QueryProvider(QueryProvider other) { + this(other.query, other.parsedQuery, other.parsingException); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { // Has our bug fix for query/agg providers + out.writeMap(query); + out.writeOptionalNamedWriteable(parsedQuery); + out.writeException(parsingException); + } else if (out.getVersion().onOrAfter(Version.V_6_6_0)) { // Has the bug, but supports lazy objects + // We allow the lazy parsing nodes that have the bug throw any parsing errors themselves as + // they already have the ability to fully parse the passed Maps + out.writeMap(query); + } else { // only supports eagerly parsed objects + if (parsingException != null) { // Do we have a parsing error? Throw it + if (parsingException instanceof IOException) { + throw (IOException) parsingException; + } else { + throw new ElasticsearchException(parsingException); + } + } else if (parsedQuery == null) { // Do we have a query defined but not parsed? + // This is an admittedly rare case but we should fail early instead of writing null when there + // actually is a query defined + throw new ElasticsearchException("Unsupported operation: parsed query is null"); + } + out.writeNamedWriteable(parsedQuery); + } + } + + public Exception getParsingException() { + return parsingException; + } + + public QueryBuilder getParsedQuery() { + return parsedQuery; + } + + public Map getQuery() { + return query; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + QueryProvider that = (QueryProvider) other; + + return Objects.equals(this.query, that.query) + && Objects.equals(this.parsedQuery, that.parsedQuery) + && Objects.equals(this.parsingException, that.parsingException); + } + + @Override + public int hashCode() { + return Objects.hash(query, parsedQuery, parsingException); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.map(query); + return builder; + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java index 20968b22425a2..ff2763833615e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java @@ -33,4 +33,9 @@ public interface DataExtractor { * Cancel the current search. */ void cancel(); + + /** + * @return the end time to which this extractor will search + */ + long getEndTime(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index a877b72bee0da..80542909efda9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -26,8 +26,8 @@ public final class Messages { "delayed_data_check_config: check_window [{0}] must be greater than the bucket_span [{1}]"; public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS = "delayed_data_check_config: check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; - public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed [{0}] query is not parsable: {1}"; - public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed [{0}] aggregations are not parsable: {1}"; + public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed query is not parsable"; + public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed aggregations are not parsable"; public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; @@ -79,6 +79,7 @@ public final class Messages { public static final String JOB_AUDIT_DATAFEED_STARTED_FROM_TO = "Datafeed started (from: {0} to: {1}) with frequency [{2}]"; public static final String JOB_AUDIT_DATAFEED_STARTED_REALTIME = "Datafeed started in real-time"; public static final String JOB_AUDIT_DATAFEED_STOPPED = "Datafeed stopped"; + public static final String JOB_AUDIT_DATAFEED_ISOLATED = "Datafeed isolated"; public static final String JOB_AUDIT_DELETING = "Deleting job by task with id ''{0}''"; public static final String JOB_AUDIT_DELETING_FAILED = "Error deleting job: {0}"; public static final String JOB_AUDIT_DELETED = "Job deleted"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index d51a8f10e4a5a..986c352b9cd7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; @@ -62,6 +62,7 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -86,8 +87,6 @@ */ public class ElasticsearchMappings { - public static final String DOC_TYPE = "doc"; - /** * String constants used in mappings */ @@ -137,7 +136,7 @@ private ElasticsearchMappings() { public static XContentBuilder configMapping() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); - builder.startObject(DOC_TYPE); + builder.startObject(SINGLE_MAPPING_NAME); addMetaInformation(builder); addDefaultMapping(builder); builder.startObject(PROPERTIES); @@ -420,14 +419,14 @@ public static void addMetaInformation(XContentBuilder builder) throws IOExceptio .endObject(); } - public static XContentBuilder resultsMapping() throws IOException { - return resultsMapping(Collections.emptyList()); + public static XContentBuilder resultsMapping(String mappingType) throws IOException { + return resultsMapping(mappingType, Collections.emptyList()); } - public static XContentBuilder resultsMapping(Collection extraTermFields) throws IOException { + public static XContentBuilder resultsMapping(String mappingType, Collection extraTermFields) throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); - builder.startObject(DOC_TYPE); + builder.startObject(mappingType); addMetaInformation(builder); addDefaultMapping(builder); builder.startObject(PROPERTIES); @@ -456,11 +455,12 @@ public static XContentBuilder resultsMapping(Collection extraTermFields) // end properties builder.endObject(); - // end mapping + // end type builder.endObject(); - // end doc + // end mapping builder.endObject(); + return builder; } @@ -575,18 +575,25 @@ private static void addResultsMapping(XContentBuilder builder) throws IOExceptio addModelSizeStatsFieldsToMapping(builder); } - public static XContentBuilder termFieldsMapping(String type, Collection termFields) { + /** + * Generate a keyword mapping for {@code termFields} for the default type + * {@link org.elasticsearch.index.mapper.MapperService#SINGLE_MAPPING_NAME} + * + * If the returned mapping is used in index creation and the new index has a matching template + * then the mapping type ({@link org.elasticsearch.index.mapper.MapperService#SINGLE_MAPPING_NAME}) + * must match the mapping type of the template otherwise the mappings will not be merged correctly. + * + * @param termFields Fields to generate mapping for + * @return The mapping + */ + public static XContentBuilder termFieldsMapping(Collection termFields) { try { XContentBuilder builder = jsonBuilder().startObject(); - if (type != null) { - builder.startObject(type); - } + builder.startObject(SINGLE_MAPPING_NAME); builder.startObject(PROPERTIES); addTermFields(builder, termFields); builder.endObject(); - if (type != null) { - builder.endObject(); - } + builder.endObject(); return builder.endObject(); } catch (IOException e) { throw new RuntimeException(e); @@ -872,7 +879,7 @@ private static void addCategoryDefinitionMapping(XContentBuilder builder) throws public static XContentBuilder stateMapping() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); - builder.startObject(DOC_TYPE); + builder.startObject(SINGLE_MAPPING_NAME); addMetaInformation(builder); builder.field(ENABLED, false); builder.endObject(); @@ -960,33 +967,34 @@ private static void addModelSizeStatsFieldsToMapping(XContentBuilder builder) th } public static XContentBuilder auditMessageMapping() throws IOException { - XContentBuilder builder = jsonBuilder().startObject() - .startObject(AuditMessage.TYPE.getPreferredName()); + XContentBuilder builder = jsonBuilder().startObject(); + builder.startObject(SINGLE_MAPPING_NAME); addMetaInformation(builder); builder.startObject(PROPERTIES) - .startObject(Job.ID.getPreferredName()) - .field(TYPE, KEYWORD) - .endObject() - .startObject(AuditMessage.LEVEL.getPreferredName()) - .field(TYPE, KEYWORD) - .endObject() - .startObject(AuditMessage.MESSAGE.getPreferredName()) - .field(TYPE, TEXT) - .startObject(FIELDS) - .startObject(RAW) - .field(TYPE, KEYWORD) - .endObject() - .endObject() - .endObject() - .startObject(AuditMessage.TIMESTAMP.getPreferredName()) - .field(TYPE, DATE) - .endObject() - .startObject(AuditMessage.NODE_NAME.getPreferredName()) - .field(TYPE, KEYWORD) - .endObject() + .startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AuditMessage.LEVEL.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AuditMessage.MESSAGE.getPreferredName()) + .field(TYPE, TEXT) + .startObject(FIELDS) + .startObject(RAW) + .field(TYPE, KEYWORD) .endObject() .endObject() - .endObject(); + .endObject() + .startObject(AuditMessage.TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(AuditMessage.NODE_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .endObject(); + return builder; } @@ -994,12 +1002,12 @@ static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndic List indicesToUpdate = new ArrayList<>(); ImmutableOpenMap> currentMapping = state.metaData().findMappings(concreteIndices, - new String[] {DOC_TYPE}, MapperPlugin.NOOP_FIELD_FILTER); + new String[0], MapperPlugin.NOOP_FIELD_FILTER); for (String index : concreteIndices) { ImmutableOpenMap innerMap = currentMapping.get(index); if (innerMap != null) { - MappingMetaData metaData = innerMap.get(DOC_TYPE); + MappingMetaData metaData = innerMap.valuesIt().next(); try { @SuppressWarnings("unchecked") Map meta = (Map) metaData.sourceAsMap().get("_meta"); @@ -1038,7 +1046,8 @@ static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndic return indicesToUpdate.toArray(new String[indicesToUpdate.size()]); } - public static void addDocMappingIfMissing(String alias, CheckedSupplier mappingSupplier, + public static void addDocMappingIfMissing(String alias, + CheckedBiFunction, XContentBuilder, IOException> mappingSupplier, Client client, ClusterState state, ActionListener listener) { AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); if (aliasOrIndex == null) { @@ -1058,9 +1067,13 @@ public static void addDocMappingIfMissing(String alias, CheckedSupplier 0) { - try (XContentBuilder mapping = mappingSupplier.get()) { + // Use the mapping type of the first index in the update + IndexMetaData indexMetaData = state.metaData().index(indicesThatRequireAnUpdate[0]); + String mappingType = indexMetaData.mapping().type(); + + try (XContentBuilder mapping = mappingSupplier.apply(mappingType, Collections.emptyList())) { PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); - putMappingRequest.type(DOC_TYPE); + putMappingRequest.type(mappingType); putMappingRequest.source(mapping); executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest, ActionListener.wrap(response -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java index 08a6d8846f9a3..8d542ce25af93 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java @@ -24,7 +24,7 @@ * Job processed record counts. *

* The getInput... methods return the actual number of - * fields/records sent the the API including invalid records. + * fields/records sent the API including invalid records. * The getProcessed... methods are the number sent to the * Engine. *

diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java index 1763006afbe91..fcb7ed479cf38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java @@ -22,7 +22,7 @@ import java.util.Objects; public class AuditMessage implements ToXContentObject, Writeable { - public static final ParseField TYPE = new ParseField("audit_message"); + private static final ParseField TYPE = new ParseField("audit_message"); public static final ParseField MESSAGE = new ParseField("message"); public static final ParseField LEVEL = new ParseField("level"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java index bbea101418329..74350c2406499 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -18,12 +17,10 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -38,15 +35,8 @@ public class XContentObjectTransformer { private final NamedXContentRegistry registry; private final CheckedFunction parserFunction; - // We need this registry for parsing out Aggregations and Searches - private static NamedXContentRegistry searchRegistry; - static { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); - searchRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); - } - - public static XContentObjectTransformer aggregatorTransformer() { - return new XContentObjectTransformer<>(searchRegistry, (p) -> { + public static XContentObjectTransformer aggregatorTransformer(NamedXContentRegistry registry) { + return new XContentObjectTransformer<>(registry, (p) -> { // Serializing a map creates an object, need to skip the start object for the aggregation parser XContentParser.Token token = p.nextToken(); assert(XContentParser.Token.START_OBJECT.equals(token)); @@ -54,8 +44,8 @@ public static XContentObjectTransformer aggregatorT }); } - public static XContentObjectTransformer queryBuilderTransformer() { - return new XContentObjectTransformer<>(searchRegistry, AbstractQueryBuilder::parseInnerQueryBuilder); + public static XContentObjectTransformer queryBuilderTransformer(NamedXContentRegistry registry) { + return new XContentObjectTransformer<>(registry, AbstractQueryBuilder::parseInnerQueryBuilder); } XContentObjectTransformer(NamedXContentRegistry registry, CheckedFunction parserFunction) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java index adb6b04db35b2..12c4e0b7ee3cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java @@ -7,10 +7,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.Requests; +import org.elasticsearch.action.bulk.BulkRequestParser; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,38 +66,32 @@ public MonitoringBulkRequest add(MonitoringBulkDoc doc) { * Parses a monitoring bulk request and builds the list of documents to be indexed. */ public MonitoringBulkRequest add(final MonitoredSystem system, - final String defaultType, final BytesReference content, final XContentType xContentType, final long timestamp, final long intervalMillis) throws IOException { - // MonitoringBulkRequest accepts a body request that has the same format as the BulkRequest: - // instead of duplicating the parsing logic here we use a new BulkRequest instance to parse the content. - final BulkRequest bulkRequest = Requests.bulkRequest().add(content, null, defaultType, xContentType); + // MonitoringBulkRequest accepts a body request that has the same format as the BulkRequest + new BulkRequestParser(false).parse(content, null, null, null, null, true, xContentType, + indexRequest -> { + // we no longer accept non-timestamped indexes from Kibana, LS, or Beats because we do not use the data + // and it was duplicated anyway; by simply dropping it, we allow BWC for older clients that still send it + if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) { + return; + } + final BytesReference source = indexRequest.source(); + if (source.length() == 0) { + throw new IllegalArgumentException("source is missing for monitoring document [" + + indexRequest.index() + "][" + indexRequest.type() + "][" + indexRequest.id() + "]"); + } + + // builds a new monitoring document based on the index request + add(new MonitoringBulkDoc(system, indexRequest.type(), indexRequest.id(), timestamp, intervalMillis, source, + xContentType)); + }, + updateRequest -> { throw new IllegalArgumentException("monitoring bulk requests should only contain index requests"); }, + deleteRequest -> { throw new IllegalArgumentException("monitoring bulk requests should only contain index requests"); }); - for (DocWriteRequest request : bulkRequest.requests()) { - if (request instanceof IndexRequest) { - final IndexRequest indexRequest = (IndexRequest) request; - - // we no longer accept non-timestamped indexes from Kibana, LS, or Beats because we do not use the data - // and it was duplicated anyway; by simply dropping it, we allow BWC for older clients that still send it - if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) { - continue; - } - - final BytesReference source = indexRequest.source(); - if (source.length() == 0) { - throw new IllegalArgumentException("source is missing for monitoring document [" - + indexRequest.index() + "][" + indexRequest.type() + "][" + indexRequest.id() + "]"); - } - - // builds a new monitoring document based on the index request - add(new MonitoringBulkDoc(system, indexRequest.type(), indexRequest.id(), timestamp, intervalMillis, source, xContentType)); - } else { - throw new IllegalArgumentException("monitoring bulk requests should only contain index requests"); - } - } return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java index 576505a2d2eb1..904aac453fdc6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java @@ -26,12 +26,11 @@ public MonitoringBulkRequestBuilder add(MonitoringBulkDoc doc) { } public MonitoringBulkRequestBuilder add(final MonitoredSystem system, - final String type, final BytesReference content, final XContentType xContentType, final long timestamp, final long intervalMillis) throws IOException { - request.add(system, type, content, xContentType, timestamp, intervalMillis); + request.add(system, content, xContentType, timestamp, intervalMillis); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java index 0642ed5955f77..21ceb6097ae3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java @@ -16,9 +16,7 @@ import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import java.io.IOException; -import java.time.Instant; import java.time.ZoneOffset; -import java.time.ZonedDateTime; import java.util.Objects; /** @@ -26,7 +24,7 @@ */ public abstract class MonitoringDoc implements ToXContentObject { - private static final DateFormatter dateTimeFormatter = DateFormatter.forPattern("strict_date_time"); + private static final DateFormatter dateTimeFormatter = DateFormatter.forPattern("strict_date_time").withZone(ZoneOffset.UTC); private final String cluster; private final long timestamp; private final long intervalMillis; @@ -126,9 +124,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * @return a string representing the timestamp */ public static String toUTC(final long timestamp) { - ZonedDateTime zonedDateTime = Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC); - return dateTimeFormatter.format(zonedDateTime); - + return dateTimeFormatter.formatMillis(timestamp); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java index 7666ac6ca72eb..9d4a941a24c2c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java @@ -29,22 +29,23 @@ public final class MonitoringTemplateUtils { *

* It may be possible for this to diverge between templates and pipelines, but for now they're the same. */ - public static final int LAST_UPDATED_VERSION = Version.V_7_0_0.id; + public static final int LAST_UPDATED_VERSION = Version.V_7_0_1.id; /** * Current version of templates used in their name to differentiate from breaking changes (separate from product version). + * Version 7 has the same structure as version 6, but uses the `_doc` type. */ - public static final String TEMPLATE_VERSION = "6"; + public static final String TEMPLATE_VERSION = "7"; /** * The previous version of templates, which we still support via the REST /_monitoring/bulk endpoint because * nothing changed for those documents. */ - public static final String OLD_TEMPLATE_VERSION = "2"; + public static final String OLD_TEMPLATE_VERSION = "6"; /** * IDs of templates that can be used with {@linkplain #loadTemplate(String) loadTemplate}. */ - public static final String[] TEMPLATE_IDS = { "alerts", "es", "kibana", "logstash", "beats" }; + public static final String[] TEMPLATE_IDS = { "alerts-7", "es", "kibana", "logstash", "beats" }; /** * IDs of templates that can be used with {@linkplain #createEmptyTemplate(String) createEmptyTemplate} that are not managed by a @@ -54,7 +55,7 @@ public final class MonitoringTemplateUtils { * instances will attempt to create a named template based on the templates that they expect (e.g., ".monitoring-es-2") and not the * ones that we are creating. */ - public static final String[] OLD_TEMPLATE_IDS = { "data", "es", "kibana", "logstash", "alerts" }; + public static final String[] OLD_TEMPLATE_IDS = { "data", "es", "kibana", "logstash" }; //excluding alerts since 6.x watches use it /** * IDs of pipelines that can be used with @@ -99,7 +100,7 @@ public static String loadTemplate(final String id) { * @see #OLD_TEMPLATE_VERSION */ public static String createEmptyTemplate(final String id) { - // e.g., { "index_patterns": [ ".monitoring-data-2*" ], "version": 6000002 } + // e.g., { "index_patterns": [ ".monitoring-data-6*" ], "version": 6000002 } return "{\"index_patterns\":[\".monitoring-" + id + "-" + OLD_TEMPLATE_VERSION + "*\"],\"version\":" + LAST_UPDATED_VERSION + "}"; } @@ -120,7 +121,7 @@ public static String pipelineName(String id) { * The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or * {@link BytesReference#bytes(XContentBuilder)}}. * - * @param id The API version (e.g., "2") to use + * @param id The API version (e.g., "6") to use * @param type The type of data you want to format for the request * @return Never {@code null}. Always an ended-object. * @throws IllegalArgumentException if {@code apiVersion} is unrecognized @@ -131,7 +132,7 @@ public static XContentBuilder loadPipeline(final String id, final XContentType t case TEMPLATE_VERSION: return emptyPipeline(type); case OLD_TEMPLATE_VERSION: - return pipelineForApiVersion2(type); + return pipelineForApiVersion6(type); } throw new IllegalArgumentException("unrecognized pipeline API version [" + id + "]"); @@ -139,13 +140,6 @@ public static XContentBuilder loadPipeline(final String id, final XContentType t /** * Create a pipeline to upgrade documents from {@link MonitoringTemplateUtils#OLD_TEMPLATE_VERSION} - *


-     * {
-     *   "description" : "This pipeline upgrades documents ...",
-     *   "version": 6000001,
-     *   "processors": [ ]
-     * }
-     * 
* The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or * {@link BytesReference#bytes(XContentBuilder)}}. * @@ -153,81 +147,39 @@ public static XContentBuilder loadPipeline(final String id, final XContentType t * @return Never {@code null}. Always an ended-object. * @see #LAST_UPDATED_VERSION */ - static XContentBuilder pipelineForApiVersion2(final XContentType type) { + static XContentBuilder pipelineForApiVersion6(final XContentType type) { try { - // For now: We prepend the API version to the string so that it's easy to parse in the future; if we ever add metadata - // to pipelines, then it would better serve this use case return XContentBuilder.builder(type.xContent()).startObject() .field("description", "This pipeline upgrades documents from the older version of the Monitoring API to " + - "the newer version (" + TEMPLATE_VERSION + ") by fixing breaking " + - "changes in those older documents before they are indexed from the older version (" + - OLD_TEMPLATE_VERSION + ").") + "the newer version (" + TEMPLATE_VERSION + ") by fixing breaking " + + "changes in those older documents before they are indexed from the older version (" + + OLD_TEMPLATE_VERSION + ").") .field("version", LAST_UPDATED_VERSION) .startArray("processors") .startObject() - // Drop the .monitoring-data-2 index and effectively drop unnecessary data (duplicate or simply unused) + // remove the type .startObject("script") - .field("source", - "boolean legacyIndex = ctx._index == '.monitoring-data-2';" + - "if (legacyIndex || ctx._index.startsWith('.monitoring-es-2')) {" + - "if (ctx._type == 'cluster_info') {" + - "ctx._type = 'cluster_stats';" + - "ctx._id = null;" + - "} else if (legacyIndex || ctx._type == 'cluster_stats' || ctx._type == 'node') {" + - "String index = ctx._index;" + - "Object clusterUuid = ctx.cluster_uuid;" + - "Object timestamp = ctx.timestamp;" + - - "ctx.clear();" + - - "ctx._id = 'xpack_monitoring_2_drop_bucket';" + - "ctx._index = index;" + - "ctx._type = 'legacy_data';" + - "ctx.timestamp = timestamp;" + - "ctx.cluster_uuid = clusterUuid;" + - "}" + - "if (legacyIndex) {" + - "ctx._index = '<.monitoring-es-" + TEMPLATE_VERSION + "-{now}>';" + - "}" + - "}") - .endObject() - .endObject() - .startObject() - .startObject("rename") - .field("field", "_type") - .field("target_field", "type") - .endObject() - .endObject() - .startObject() - .startObject("set") - .field("field", "_type") - .field("value", "doc") + .field("source","ctx._type = null" ) .endObject() .endObject() .startObject() + // ensure the data lands in the correct index .startObject("gsub") .field("field", "_index") - .field("pattern", "(.monitoring-\\w+-)2(-.+)") + .field("pattern", "(.monitoring-\\w+-)6(-.+)") .field("replacement", "$1" + TEMPLATE_VERSION + "$2") .endObject() .endObject() .endArray() - .endObject(); + .endObject(); } catch (final IOException e) { throw new RuntimeException("Failed to create pipeline to upgrade from older version [" + OLD_TEMPLATE_VERSION + - "] to the newer version [" + TEMPLATE_VERSION + "].", e); + "] to the newer version [" + TEMPLATE_VERSION + "].", e); } } /** * Create an empty pipeline. - *

-     * {
-     *   "description" : "This is a placeholder pipeline ...",
-     *   "version": 6000001,
-     *   "processors": [ ]
-     * }
-     * 
* The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or * {@link BytesReference#bytes(XContentBuilder)}}. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java index 66a2eb358986a..95dca09661978 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -126,8 +126,12 @@ public void start(Collection jobs) { public void stop() { scheduler.shutdownNow(); try { - scheduler.awaitTermination(5, TimeUnit.SECONDS); + final boolean terminated = scheduler.awaitTermination(5L, TimeUnit.SECONDS); + if (terminated == false) { + logger.warn("scheduler engine was not terminated after waiting 5s"); + } } catch (InterruptedException e) { + logger.warn("interrupted while waiting for scheduler engine termination"); Thread.currentThread().interrupt(); } } @@ -193,7 +197,7 @@ public void run() { } catch (final Throwable t) { /* * Allowing the throwable to escape here will lead to be it being caught in FutureTask#run and set as the outcome of this - * task; however, we never inspect the the outcomes of these scheduled tasks and so allowing the throwable to escape + * task; however, we never inspect the outcomes of these scheduled tasks and so allowing the throwable to escape * unhandled here could lead to us losing fatal errors. Instead, we rely on ExceptionsHelper#maybeDieOnAnotherThread to * appropriately dispatch any error to the uncaught exception handler. We should never see an exception here as these do * not escape from SchedulerEngine#notifyListeners. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index f615fbd0b5306..07d6872a44d03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -90,6 +90,10 @@ protected void innerXContent(XContentBuilder builder, Params params) throws IOEx builder.field(AUDIT_XFIELD, auditUsage); builder.field(IP_FILTER_XFIELD, ipFilterUsage); builder.field(ANONYMOUS_XFIELD, anonymousUsage); + } else if (sslUsage.isEmpty() == false) { + // A trial (or basic) license can have SSL without security. + // This is because security defaults to disabled on that license, but that dynamic-default does not disable SSL. + builder.field(SSL_XFIELD, sslUsage); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java index 28cb3d4fbbe66..ffe6d117fc689 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java @@ -10,7 +10,6 @@ public final class NativeRoleMappingStoreField { public static final String DOC_TYPE_FIELD = "doc_type"; public static final String DOC_TYPE_ROLE_MAPPING = "role-mapping"; public static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_"; - public static final String SECURITY_GENERIC_TYPE = "doc"; private NativeRoleMappingStoreField() {} } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index d15bf966276a0..ec4afa5375932 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -138,27 +138,40 @@ public ResourcePrivilegesMap checkResourcePrivileges(Set checkForIndexPa final ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder = ResourcePrivilegesMap.builder(); final Map predicateCache = new HashMap<>(); for (String forIndexPattern : checkForIndexPatterns) { - final Automaton checkIndexAutomaton = IndicesPermission.Group.buildIndexMatcherAutomaton(allowRestrictedIndices, - forIndexPattern); - Automaton allowedIndexPrivilegesAutomaton = null; - for (Group group : groups) { - final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, - g -> IndicesPermission.Group.buildIndexMatcherAutomaton(g.allowRestrictedIndices(), g.indices())); - if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { - if (allowedIndexPrivilegesAutomaton != null) { - allowedIndexPrivilegesAutomaton = Automatons - .unionAndMinimize(Arrays.asList(allowedIndexPrivilegesAutomaton, group.privilege().getAutomaton())); + Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); + if (false == allowRestrictedIndices && false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(forIndexPattern)) { + checkIndexAutomaton = Automatons.minusAndMinimize(checkIndexAutomaton, RestrictedIndicesNames.NAMES_AUTOMATON); + } + if (false == Operations.isEmpty(checkIndexAutomaton)) { + Automaton allowedIndexPrivilegesAutomaton = null; + for (Group group : groups) { + final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, + g -> IndicesPermission.Group.buildIndexMatcherAutomaton(g.allowRestrictedIndices(), g.indices())); + if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { + if (allowedIndexPrivilegesAutomaton != null) { + allowedIndexPrivilegesAutomaton = Automatons + .unionAndMinimize(Arrays.asList(allowedIndexPrivilegesAutomaton, group.privilege().getAutomaton())); + } else { + allowedIndexPrivilegesAutomaton = group.privilege().getAutomaton(); + } + } + } + for (String privilege : checkForPrivileges) { + IndexPrivilege indexPrivilege = IndexPrivilege.get(Collections.singleton(privilege)); + if (allowedIndexPrivilegesAutomaton != null + && Operations.subsetOf(indexPrivilege.getAutomaton(), allowedIndexPrivilegesAutomaton)) { + resourcePrivilegesMapBuilder.addResourcePrivilege(forIndexPattern, privilege, Boolean.TRUE); } else { - allowedIndexPrivilegesAutomaton = group.privilege().getAutomaton(); + resourcePrivilegesMapBuilder.addResourcePrivilege(forIndexPattern, privilege, Boolean.FALSE); } } - } - for (String privilege : checkForPrivileges) { - IndexPrivilege indexPrivilege = IndexPrivilege.get(Collections.singleton(privilege)); - if (allowedIndexPrivilegesAutomaton != null - && Operations.subsetOf(indexPrivilege.getAutomaton(), allowedIndexPrivilegesAutomaton)) { - resourcePrivilegesMapBuilder.addResourcePrivilege(forIndexPattern, privilege, Boolean.TRUE); - } else { + } else { + // the index pattern produced the empty automaton, presumably because the requested pattern expands exclusively inside the + // restricted indices namespace - a namespace of indices that are normally hidden when granting/checking privileges - and + // the pattern was not marked as `allowRestrictedIndices`. We try to anticipate this by considering _explicit_ restricted + // indices even if `allowRestrictedIndices` is false. + // TODO The `false` result is a _safe_ default but this is actually an error. Make it an error. + for (String privilege : checkForPrivileges) { resourcePrivilegesMapBuilder.addResourcePrivilege(forIndexPattern, privilege, Boolean.FALSE); } } @@ -264,7 +277,7 @@ public static class Group { private final Set query; // by default certain restricted indices are exempted when granting privileges, as they should generally be hidden for ordinary // users. Setting this flag true eliminates the special status for the purpose of this permission - restricted indices still have - // to be covered by the the "indices" + // to be covered by the "indices" private final boolean allowRestrictedIndices; public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set query, @@ -303,7 +316,7 @@ private boolean check(String action) { private boolean check(String action, String index) { assert index != null; return check(action) && indexNameMatcher.test(index) - && (allowRestrictedIndices || (false == RestrictedIndicesNames.NAMES_SET.contains(index))); + && (allowRestrictedIndices || (false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index))); } boolean hasQuery() { @@ -338,13 +351,13 @@ private static Predicate buildIndexMatcherPredicateForAction(String acti final Predicate predicate; if (restrictedIndices.isEmpty()) { predicate = indexMatcher(ordinaryIndices) - .and(index -> false == RestrictedIndicesNames.NAMES_SET.contains(index)); + .and(index -> false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index)); } else if (ordinaryIndices.isEmpty()) { predicate = indexMatcher(restrictedIndices); } else { predicate = indexMatcher(restrictedIndices) .or(indexMatcher(ordinaryIndices) - .and(index -> false == RestrictedIndicesNames.NAMES_SET.contains(index))); + .and(index -> false == RestrictedIndicesNames.RESTRICTED_NAMES.contains(index))); } return predicate; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 817a9e41eab71..207a5ab056709 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -49,7 +49,6 @@ public class Role { this.runAs = Objects.requireNonNull(runAs); } - public String[] names() { return names; } @@ -116,7 +115,7 @@ public boolean checkIndicesAction(String action) { * @return an instance of {@link ResourcePrivilegesMap} */ public ResourcePrivilegesMap checkIndicesPrivileges(Set checkForIndexPatterns, boolean allowRestrictedIndices, - Set checkForPrivileges) { + Set checkForPrivileges) { return indices.checkResourcePrivileges(checkForIndexPatterns, allowRestrictedIndices, checkForPrivileges); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index d24863d6d53c4..e20e76ee47b37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.ccr.action.ForgetFollowerAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; @@ -62,6 +63,7 @@ public final class IndexPrivilege extends Privilege { ExplainLifecycleAction.NAME); private static final Automaton MANAGE_FOLLOW_INDEX_AUTOMATON = patterns(PutFollowAction.NAME, UnfollowAction.NAME, CloseIndexAction.NAME + "*"); + private static final Automaton MANAGE_LEADER_INDEX_AUTOMATON = patterns(ForgetFollowerAction.NAME + "*"); private static final Automaton MANAGE_ILM_AUTOMATON = patterns("indices:admin/ilm/*"); public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY); @@ -78,6 +80,7 @@ public final class IndexPrivilege extends Privilege { public static final IndexPrivilege CREATE_INDEX = new IndexPrivilege("create_index", CREATE_INDEX_AUTOMATON); public static final IndexPrivilege VIEW_METADATA = new IndexPrivilege("view_index_metadata", VIEW_METADATA_AUTOMATON); public static final IndexPrivilege MANAGE_FOLLOW_INDEX = new IndexPrivilege("manage_follow_index", MANAGE_FOLLOW_INDEX_AUTOMATON); + public static final IndexPrivilege MANAGE_LEADER_INDEX = new IndexPrivilege("manage_leader_index", MANAGE_LEADER_INDEX_AUTOMATON); public static final IndexPrivilege MANAGE_ILM = new IndexPrivilege("manage_ilm", MANAGE_ILM_AUTOMATON); private static final Map VALUES = MapBuilder.newMapBuilder() @@ -95,6 +98,7 @@ public final class IndexPrivilege extends Privilege { .put("view_index_metadata", VIEW_METADATA) .put("read_cross_cluster", READ_CROSS_CLUSTER) .put("manage_follow_index", MANAGE_FOLLOW_INDEX) + .put("manage_leader_index", MANAGE_LEADER_INDEX) .put("manage_ilm", MANAGE_ILM) .immutableMap(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index ec3305a963c55..dda81e6b86197 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authz.privilege; +import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.transport.TransportActionProxy; @@ -29,6 +30,9 @@ public final class SystemPrivilege extends Privilege { "indices:admin/seq_no/global_checkpoint_sync*", // needed for global checkpoint syncs RetentionLeaseSyncAction.ACTION_NAME + "*", // needed for retention lease syncs RetentionLeaseBackgroundSyncAction.ACTION_NAME + "*", // needed for background retention lease syncs + RetentionLeaseActions.Add.ACTION_NAME + "*", // needed for CCR to add retention leases + RetentionLeaseActions.Remove.ACTION_NAME + "*", // needed for CCR to remove retention leases + RetentionLeaseActions.Renew.ACTION_NAME + "*", // needed for CCR to renew retention leases "indices:admin/settings/update" // needed for DiskThresholdMonitor.markIndicesReadOnly ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java index fc03831d1445b..439168350fc7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/RestrictedIndicesNames.java @@ -9,19 +9,20 @@ import org.apache.lucene.util.automaton.Automaton; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.security.support.Automatons; -import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; import java.util.Collections; import java.util.Set; public final class RestrictedIndicesNames { - public static final String AUDIT_INDEX_NAME_PREFIX = ".security_audit_log"; - public static final String INTERNAL_SECURITY_INDEX = ".security-" + IndexUpgradeCheckVersion.UPRADE_VERSION; + public static final String INTERNAL_SECURITY_INDEX_6 = ".security-6"; + public static final String INTERNAL_SECURITY_INDEX_7 = ".security-7"; public static final String SECURITY_INDEX_NAME = ".security"; - public static final Set NAMES_SET = Collections.unmodifiableSet(Sets.newHashSet(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); - public static final Automaton NAMES_AUTOMATON = Automatons.patterns(NAMES_SET); + public static final Set RESTRICTED_NAMES = Collections.unmodifiableSet( + Sets.newHashSet(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX_6, INTERNAL_SECURITY_INDEX_7)); + + public static final Automaton NAMES_AUTOMATON = Automatons.patterns(RESTRICTED_NAMES); private RestrictedIndicesNames() { } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java index 11843a40020a2..0a902aba22cb6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertParsingUtils.java @@ -92,6 +92,9 @@ public static Certificate[] readCertificates(List certPaths) throws Certif for (Path path : certPaths) { try (InputStream input = Files.newInputStream(path)) { certificates.addAll((Collection) certFactory.generateCertificates(input)); + if (certificates.isEmpty()) { + throw new CertificateException("failed to parse any certificates from [" + path.toAbsolutePath() + "]"); + } } } return certificates.toArray(new Certificate[0]); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java index 421b30baac7b6..1e67e15a33c59 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PemUtils.java @@ -374,7 +374,7 @@ private static byte[] possiblyDecryptPKCS1Key(Map pemHeaders, St * defined in RFC 1423. RFC 1423 only defines DES-CBS and triple DES (EDE) in CBC mode. AES in CBC mode is also widely used though ( 3 * different variants of 128, 192, 256 bit keys ) * - * @param dekHeaderValue The value of the the DEK-Info PEM header + * @param dekHeaderValue The value of the DEK-Info PEM header * @param password The password with which the key is encrypted * @return a cipher of the appropriate algorithm and parameters to be used for decryption * @throws GeneralSecurityException if the algorithm is not available in the used security provider, or if the key is inappropriate diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 3611b6663a38f..772644c7e9981 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -99,7 +99,7 @@ public class SSLService { private final Map sslConfigurations; /** - * A mapping from a SSLConfiguration to a pre-built context. + * A mapping from an SSLConfiguration to a pre-built context. *

* This is managed separately to the {@link #sslConfigurations} map, so that a single configuration (by object equality) * always maps to the same {@link SSLContextHolder}, even if it is being used within a different context-name. @@ -251,7 +251,7 @@ public SSLEngine createSSLEngine(SSLConfiguration configuration, String host, in String[] supportedProtocols = configuration.supportedProtocols().toArray(Strings.EMPTY_ARRAY); SSLParameters parameters = new SSLParameters(ciphers, supportedProtocols); if (configuration.verificationMode().isHostnameVerificationEnabled() && host != null) { - // By default, a SSLEngine will not perform hostname verification. In order to perform hostname verification + // By default, an SSLEngine will not perform hostname verification. In order to perform hostname verification // we need to specify a EndpointIdentificationAlgorithm. We use the HTTPS algorithm to prevent against // man in the middle attacks for all of our connections. parameters.setEndpointIdentificationAlgorithm("HTTPS"); @@ -301,7 +301,7 @@ SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) { Objects.requireNonNull(sslConfiguration, "SSL Configuration cannot be null"); SSLContextHolder holder = sslContexts.get(sslConfiguration); if (holder == null) { - throw new IllegalArgumentException("did not find a SSLContext for [" + sslConfiguration.toString() + "]"); + throw new IllegalArgumentException("did not find an SSLContext for [" + sslConfiguration.toString() + "]"); } return holder; } @@ -627,7 +627,7 @@ static void invalidateSessions(SSLSessionContext sslSessionContext) { while (sessionIds.hasMoreElements()) { byte[] sessionId = sessionIds.nextElement(); SSLSession session = sslSessionContext.getSession(sessionId); - // a SSLSession could be null as there is no lock while iterating, the session cache + // an SSLSession could be null as there is no lock while iterating, the session cache // could have evicted a value, the session could be timed out, or the session could // have already been invalidated, which removes the value from the session cache in the // sun implementation diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java index 6f6592bbdfca2..a042aeb4a2359 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java @@ -9,6 +9,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.XPackSettings; /** @@ -19,10 +20,11 @@ public final class TLSLicenseBootstrapCheck implements BootstrapCheck { public BootstrapCheckResult check(BootstrapContext context) { if (XPackSettings.TRANSPORT_SSL_ENABLED.get(context.settings()) == false) { License license = LicenseService.getLicense(context.metaData()); - if (license != null && license.isProductionLicense()) { - return BootstrapCheckResult.failure("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting [xpack.security.enabled] " + - "to [false]"); + if (XPackLicenseState.isTransportTlsRequired(license, context.settings())) { + return BootstrapCheckResult.failure("Transport SSL must be enabled if security is enabled on a [" + + license.operationMode().description() + "] license. " + + "Please set [xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]"); } } return BootstrapCheckResult.success(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java index 48060c5772550..cedb09db49527 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java @@ -10,7 +10,7 @@ public final class UpgradeField { // this is the required index.format setting for 6.0 services (watcher and security) to start up // this index setting is set by the upgrade API or automatically when a 6.0 index template is created - private static final int EXPECTED_INDEX_FORMAT_VERSION = 6; + public static final int EXPECTED_INDEX_FORMAT_VERSION = 6; private UpgradeField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java index ac4a0c16c9c4e..dba835bb2f03f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java @@ -8,5 +8,4 @@ public final class TriggeredWatchStoreField { public static final String INDEX_NAME = ".triggered_watches"; - public static final String DOC_TYPE = "doc"; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java index b42506b81b3d4..4007b06ee7eca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -17,10 +17,14 @@ public final class WatcherIndexTemplateRegistryField { // Note: if you change this, also inform the kibana team around the watcher-ui public static final String INDEX_TEMPLATE_VERSION = "9"; public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; + public static final String HISTORY_TEMPLATE_NAME_NO_ILM = ".watch-history-no-ilm-" + INDEX_TEMPLATE_VERSION; public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; public static final String WATCHES_TEMPLATE_NAME = ".watches"; public static final String[] TEMPLATE_NAMES = new String[] { - HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME + HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME + }; + public static final String[] TEMPLATE_NAMES_NO_ILM = new String[] { + HISTORY_TEMPLATE_NAME_NO_ILM, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME }; private WatcherIndexTemplateRegistryField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java index f7942ec165e82..29366574134fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java @@ -26,7 +26,6 @@ public class Watch implements ToXContentObject { public static final String INCLUDE_STATUS_KEY = "include_status"; public static final String INDEX = ".watches"; - public static final String DOC_TYPE = "doc"; private final String id; private final Trigger trigger; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java index 0da93fe9ab2d7..df63022aa5734 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -80,6 +80,10 @@ public ZonedDateTime lastChecked() { return lastChecked; } + public ZonedDateTime lastMetCondition() { + return lastMetCondition; + } + public ActionStatus actionStatus(String actionId) { return actions.get(actionId); } @@ -252,10 +256,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Field.STATE.getPreferredName(), state, params); } if (lastChecked != null) { - builder.timeField(Field.LAST_CHECKED.getPreferredName(), lastChecked); + writeDate(Field.LAST_CHECKED.getPreferredName(), builder, lastChecked); } if (lastMetCondition != null) { - builder.timeField(Field.LAST_MET_CONDITION.getPreferredName(), lastMetCondition); + writeDate(Field.LAST_MET_CONDITION.getPreferredName(), builder, lastMetCondition); } if (actions != null) { builder.startObject(Field.ACTIONS.getPreferredName()); diff --git a/x-pack/plugin/core/src/main/resources/logstash-management.json b/x-pack/plugin/core/src/main/resources/logstash-management.json index 1456b2d7b5e08..d9528238dc0fb 100644 --- a/x-pack/plugin/core/src/main/resources/logstash-management.json +++ b/x-pack/plugin/core/src/main/resources/logstash-management.json @@ -8,7 +8,7 @@ } }, "mappings" : { - "doc" : { + "_doc" : { "_meta": { "logstash-version": "${logstash.template.version}" }, diff --git a/x-pack/plugin/core/src/main/resources/monitoring-alerts.json b/x-pack/plugin/core/src/main/resources/monitoring-alerts-7.json similarity index 95% rename from x-pack/plugin/core/src/main/resources/monitoring-alerts.json rename to x-pack/plugin/core/src/main/resources/monitoring-alerts-7.json index 5ab085b453075..f458ae6ad85ff 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-alerts.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-alerts-7.json @@ -1,17 +1,17 @@ { "index_patterns": [ ".monitoring-alerts-${monitoring.template.version}" ], - "version": 7000099, + "version": 7000199, "settings": { "index": { "number_of_shards": 1, "number_of_replicas": 0, "auto_expand_replicas": "0-1", - "format": 6, + "format": 7, "codec": "best_compression" } }, "mappings": { - "doc": { + "_doc": { "dynamic": false, "properties": { "timestamp": { diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/src/main/resources/monitoring-beats.json index 43ccfedd923dd..2f9bc84238ee9 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats.json @@ -5,13 +5,13 @@ "settings": { "index.auto_expand_replicas": "0-1", "index.codec": "best_compression", - "index.format": 6, + "index.format": 7, "index.number_of_replicas": 0, "index.number_of_shards": 1 }, - "version": 7000099, + "version": 7000199, "mappings": { - "doc": { + "_doc": { "dynamic": false, "properties": { "beats_state": { diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es.json b/x-pack/plugin/core/src/main/resources/monitoring-es.json index 426262cd48c03..326b8f3c7a8c7 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-es.json @@ -1,15 +1,15 @@ { "index_patterns": [ ".monitoring-es-${monitoring.template.version}-*" ], - "version": 7000099, + "version": 7000199, "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", - "index.format": 6, + "index.format": 7, "index.codec": "best_compression" }, "mappings": { - "doc": { + "_doc": { "date_detection": false, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/src/main/resources/monitoring-kibana.json b/x-pack/plugin/core/src/main/resources/monitoring-kibana.json index c220e95e182e3..6ae61f6d6c64b 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-kibana.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-kibana.json @@ -1,15 +1,15 @@ { "index_patterns": [ ".monitoring-kibana-${monitoring.template.version}-*" ], - "version": 7000099, + "version": 7000199, "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", - "index.format": 6, + "index.format": 7, "index.codec": "best_compression" }, "mappings": { - "doc": { + "_doc": { "dynamic": false, "properties": { "cluster_uuid": { @@ -50,6 +50,13 @@ }, "kibana_stats": { "properties": { + "usage": { + "properties": { + "index": { + "type": "keyword" + } + } + }, "kibana": { "properties": { "uuid": { diff --git a/x-pack/plugin/core/src/main/resources/monitoring-logstash.json b/x-pack/plugin/core/src/main/resources/monitoring-logstash.json index 5ae3e9a9eb85a..40f5b2ca217b6 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-logstash.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-logstash.json @@ -1,15 +1,15 @@ { "index_patterns": [ ".monitoring-logstash-${monitoring.template.version}-*" ], - "version": 7000099, + "version": 7000199, "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", - "index.format": 6, + "index.format": 7, "index.codec": "best_compression" }, "mappings": { - "doc": { + "_doc": { "dynamic": false, "properties": { "cluster_uuid": { diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index 183ffff4ea534..2f7cfe5704755 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -33,7 +33,7 @@ } }, "mappings" : { - "doc" : { + "_doc" : { "_meta": { "security-version": "${security.template.version}" }, @@ -59,7 +59,7 @@ }, "metadata" : { "type" : "object", - "dynamic" : true + "dynamic" : false }, "enabled": { "type": "boolean" @@ -179,7 +179,7 @@ }, "metadata" : { "type" : "object", - "dynamic" : true + "dynamic" : false }, "realm" : { "type" : "keyword" @@ -188,7 +188,7 @@ }, "rules" : { "type" : "object", - "dynamic" : true + "dynamic" : false }, "refresh_token" : { "type" : "object", @@ -236,7 +236,7 @@ }, "metadata" : { "type" : "object", - "dynamic" : true + "dynamic" : false }, "authentication" : { "type" : "binary" diff --git a/x-pack/plugin/core/src/main/resources/security_audit_log.json b/x-pack/plugin/core/src/main/resources/security_audit_log.json deleted file mode 100644 index 75c25ff53e250..0000000000000 --- a/x-pack/plugin/core/src/main/resources/security_audit_log.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "index_patterns": [ ".security_audit_log*" ], - "order": 1000, - "settings": { - "index.format": 6 - }, - "mappings": { - "doc": { - "_meta": { - "security-version": "${security.template.version}" - }, - "dynamic" : "strict", - "properties": { - "@timestamp": { - "type": "date", - "format": "date_time", - "doc_values": true - }, - "node_name": { - "type": "keyword" - }, - "node_host_name": { - "type": "keyword" - }, - "node_host_address": { - "type": "keyword" - }, - "layer": { - "type": "keyword" - }, - "event_type": { - "type": "keyword" - }, - "origin_address": { - "type": "keyword" - }, - "origin_type": { - "type": "keyword" - }, - "principal": { - "type": "keyword" - }, - "roles": { - "type": "keyword" - }, - "run_by_principal": { - "type": "keyword" - }, - "run_as_principal": { - "type": "keyword" - }, - "action": { - "type": "keyword" - }, - "indices": { - "type": "keyword" - }, - "request": { - "type": "keyword" - }, - "request_body": { - "type": "keyword", - "index": false, - "doc_values": false - }, - "uri": { - "type": "keyword" - }, - "realm": { - "type": "keyword" - }, - "run_by_realm": { - "type": "keyword" - }, - "run_as_realm": { - "type": "keyword" - }, - "transport_profile": { - "type": "keyword" - }, - "rule": { - "type": "keyword" - }, - "opaque_id": { - "type": "keyword" - } - } - } - } -} diff --git a/x-pack/plugin/core/src/main/resources/triggered-watches.json b/x-pack/plugin/core/src/main/resources/triggered-watches.json index fb345260008ca..c57d3c23a3197 100644 --- a/x-pack/plugin/core/src/main/resources/triggered-watches.json +++ b/x-pack/plugin/core/src/main/resources/triggered-watches.json @@ -9,7 +9,7 @@ "index.priority": 900 }, "mappings": { - "doc": { + "_doc": { "dynamic" : "strict", "properties": { "trigger_event": { diff --git a/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json b/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json new file mode 100644 index 0000000000000..8e91e02549c3c --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/watch-history-no-ilm.json @@ -0,0 +1,615 @@ +{ + "index_patterns": [ ".watcher-history-${xpack.watcher.template.version}*" ], + "order": 2147483646, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6 + }, + "mappings": { + "doc": { + "_meta": { + "watcher-history-version": "${xpack.watcher.template.version}" + }, + "dynamic_templates": [ + { + "disabled_payload_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*))\\.payload", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_search_request_body_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*))\\.search\\.request\\.(body|template)", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_exception_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*)|actions)\\.error", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_jira_custom_fields": { + "path_match": "result.actions.jira.fields.customfield_*", + "mapping": { + "type": "object", + "enabled": false + } + } + } + ], + "dynamic": false, + "properties": { + "watch_id": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "trigger_event": { + "type": "object", + "dynamic": true, + "properties": { + "type" : { + "type" : "keyword" + }, + "triggered_time": { + "type": "date" + }, + "manual": { + "type": "object", + "dynamic": true, + "properties": { + "schedule": { + "type": "object", + "dynamic": true, + "properties": { + "scheduled_time": { + "type": "date" + } + } + } + } + }, + "schedule": { + "type": "object", + "dynamic": true, + "properties": { + "scheduled_time": { + "type": "date" + } + } + } + } + }, + "vars" : { + "type" : "object", + "enabled" : false + }, + "input": { + "type": "object", + "enabled": false + }, + "condition": { + "type": "object", + "enabled": false + }, + "state": { + "type": "keyword" + }, + "status": { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "messages": { + "type": "text" + }, + "user": { + "type": "text" + }, + "exception" : { + "type" : "object", + "enabled" : false + }, + "result": { + "type": "object", + "dynamic": true, + "properties": { + "execution_time": { + "type": "date" + }, + "execution_duration": { + "type": "long" + }, + "input": { + "type": "object", + "dynamic": true, + "properties": { + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "payload" : { + "type" : "object", + "enabled" : false + }, + "search": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "search_type": { + "type": "keyword" + }, + "indices": { + "type": "keyword" + }, + "types": { + "type": "keyword" + } + } + } + } + }, + "http": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "path": { + "type": "keyword" + }, + "host": { + "type": "keyword" + } + } + } + } + } + } + }, + "condition" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "met" : { + "type" : "boolean" + }, + "compare" : { + "type" : "object", + "enabled" : false + }, + "array_compare" : { + "type" : "object", + "enabled" : false + }, + "script" : { + "type" : "object", + "enabled" : false + } + } + }, + "transform" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "search" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "request" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "indices" : { + "type" : "keyword" + }, + "types" : { + "type" : "keyword" + } + } + } + } + } + } + }, + "actions": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "id" : { + "type" : "keyword" + }, + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "reason" : { + "type" : "keyword" + }, + "email": { + "type": "object", + "dynamic": true, + "properties": { + "message": { + "type": "object", + "dynamic": true, + "properties": { + "id": { + "type": "keyword" + }, + "from": { + "type": "keyword" + }, + "reply_to": { + "type": "keyword" + }, + "to": { + "type": "keyword" + }, + "cc": { + "type": "keyword" + }, + "bcc": { + "type": "keyword" + } + } + } + } + }, + "webhook": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "path": { + "type": "keyword" + }, + "host": { + "type": "keyword" + } + } + } + } + }, + "index": { + "type": "object", + "dynamic": true, + "properties": { + "response": { + "type": "object", + "dynamic": true, + "properties": { + "index": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "id": { + "type": "keyword" + } + } + } + } + }, + "hipchat" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_messages": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "status": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "room" : { + "type": "keyword" + }, + "user" : { + "type": "keyword" + }, + "message" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "message_format" : { + "type" : "keyword" + }, + "color" : { + "type" : "keyword" + }, + "notify" : { + "type" : "boolean" + }, + "message" : { + "type" : "text" + }, + "from" : { + "type" : "text" + } + } + } + } + } + } + }, + "jira" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "fields": { + "type": "object", + "dynamic": true, + "properties": { + "summary": { + "type": "text" + }, + "description": { + "type": "text" + }, + "labels" : { + "type": "text" + }, + "project" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "key" : { + "type" : "keyword" + }, + "id" : { + "type" : "keyword" + } + } + }, + "issuetype" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "name" : { + "type": "keyword" + }, + "id" : { + "type" : "keyword" + } + } + } + } + }, + "result": { + "type": "object", + "dynamic": true, + "properties" : { + "id" : { + "type" : "keyword" + }, + "key" : { + "type" : "keyword" + }, + "self" : { + "type" : "keyword" + } + } + } + } + }, + "slack" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_messages": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "status": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "to" : { + "type": "keyword" + }, + "message" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "from" : { + "type" : "text" + }, + "icon" : { + "type" : "keyword" + }, + "text" : { + "type" : "text" + }, + "attachments" : { + "type" : "nested", + "include_in_parent": true, + "dynamic" : true, + "properties" : { + "color" : { + "type" : "keyword" + }, + "fields" : { + "properties" : { + "value" : { + "type" : "text" + } + } + } + } + } + } + } + } + } + } + }, + "pagerduty" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_event": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "event" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "client" : { + "type" : "text" + }, + "client_url" : { + "type" : "keyword" + }, + "account" : { + "type" : "keyword" + }, + "attach_payload" : { + "type" : "boolean" + }, + "incident_key" : { + "type" : "keyword" + }, + "description" : { + "type" : "text" + }, + "context" : { + "type" : "nested", + "include_in_parent": true, + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "href" : { + "type" : "keyword" + }, + "src" : { + "type" : "keyword" + }, + "alt" : { + "type" : "text" + } + } + } + } + } + } + } + } + } + } + } + } + }, + "metadata": { + "type": "object", + "dynamic": true + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json index db6fd4aff950a..9c5919f13a171 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -9,7 +9,7 @@ "index.format": 6 }, "mappings": { - "doc": { + "_doc": { "_meta": { "watcher-history-version": "${xpack.watcher.template.version}" }, @@ -333,63 +333,6 @@ } } }, - "hipchat" : { - "type": "object", - "dynamic": true, - "properties": { - "account": { - "type": "keyword" - }, - "sent_messages": { - "type": "nested", - "include_in_parent": true, - "dynamic": true, - "properties": { - "status": { - "type": "keyword" - }, - "reason": { - "type": "text" - }, - "request" : { - "type" : "object", - "enabled" : false - }, - "response" : { - "type" : "object", - "enabled" : false - }, - "room" : { - "type": "keyword" - }, - "user" : { - "type": "keyword" - }, - "message" : { - "type" : "object", - "dynamic" : true, - "properties" : { - "message_format" : { - "type" : "keyword" - }, - "color" : { - "type" : "keyword" - }, - "notify" : { - "type" : "boolean" - }, - "message" : { - "type" : "text" - }, - "from" : { - "type" : "text" - } - } - } - } - } - } - }, "jira" : { "type": "object", "dynamic": true, diff --git a/x-pack/plugin/core/src/main/resources/watches.json b/x-pack/plugin/core/src/main/resources/watches.json index ad744c44f1119..d02c2ffa24c5d 100644 --- a/x-pack/plugin/core/src/main/resources/watches.json +++ b/x-pack/plugin/core/src/main/resources/watches.json @@ -9,7 +9,7 @@ "index.priority": 800 }, "mappings": { - "doc": { + "_doc": { "dynamic" : "strict", "properties": { "status": { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 094c79efb50a9..c71cf35960d67 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -9,11 +9,13 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -26,6 +28,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchService; @@ -48,8 +51,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; public class FrozenIndexTests extends ESSingleNodeTestCase { @@ -370,4 +375,36 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); assertIndexFrozen(indexName); } + + public void testRecoveryState() throws ExecutionException, InterruptedException { + final String indexName = "index_recovery_state"; + createIndex(indexName, Settings.builder() + .put("index.number_of_replicas", 0) + .build()); + + final long nbDocs = randomIntBetween(0, 50); + for (long i = 0; i < nbDocs; i++) { + final IndexResponse indexResponse = client().prepareIndex(indexName, "_doc", Long.toString(i)).setSource("field", i).get(); + assertThat(indexResponse.status(), is(RestStatus.CREATED)); + } + + assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertIndexFrozen(indexName); + + final IndexMetaData indexMetaData = client().admin().cluster().prepareState().get().getState().metaData().index(indexName); + final IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(indexMetaData.getIndex()); + for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) { + final IndexShard indexShard = indexService.getShardOrNull(i); + assertThat("Shard [" + i + "] is missing for index " + indexMetaData.getIndex(), indexShard, notNullValue()); + final RecoveryState recoveryState = indexShard.recoveryState(); + assertThat(recoveryState.getRecoverySource(), is(RecoverySource.ExistingStoreRecoverySource.INSTANCE)); + assertThat(recoveryState.getStage(), is(RecoveryState.Stage.DONE)); + assertThat(recoveryState.getTargetNode(), notNullValue()); + assertThat(recoveryState.getIndex().totalFileCount(), greaterThan(0)); + assertThat(recoveryState.getIndex().reusedFileCount(), greaterThan(0)); + assertThat(recoveryState.getTranslog().recoveredOperations(), equalTo(0)); + assertThat(recoveryState.getTranslog().totalOperations(), equalTo(0)); + assertThat(recoveryState.getTranslog().recoveredPercent(), equalTo(100.0f)); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java new file mode 100644 index 0000000000000..1f65efc630915 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + + +import org.elasticsearch.bootstrap.JavaVersion; +import org.elasticsearch.test.ESTestCase; + +import java.time.LocalDate; +import java.time.ZoneOffset; + +import static org.hamcrest.Matchers.startsWith; + +/** + * Due to changes in JDK9 where locale data is used from CLDR, the licence message will differ in jdk 8 and jdk9+ + * https://openjdk.java.net/jeps/252 + */ +public class LicenseServiceTests extends ESTestCase { + + public void testLogExpirationWarningOnJdk9AndNewer() { + assumeTrue("this is for JDK9+", JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); + + long time = LocalDate.of(2018, 11, 15).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); + final boolean expired = randomBoolean(); + final String message = LicenseService.buildExpirationMessage(time, expired).toString(); + if (expired) { + assertThat(message, startsWith("LICENSE [EXPIRED] ON [THU, NOV 15, 2018].\n")); + } else { + assertThat(message, startsWith("License [will expire] on [Thu, Nov 15, 2018].\n")); + } + } + + public void testLogExpirationWarningOnJdk8() { + assumeTrue("this is for JDK8 only", JavaVersion.current().equals(JavaVersion.parse("8"))); + + long time = LocalDate.of(2018, 11, 15).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); + final boolean expired = randomBoolean(); + final String message = LicenseService.buildExpirationMessage(time, expired).toString(); + if (expired) { + assertThat(message, startsWith("LICENSE [EXPIRED] ON [THURSDAY, NOVEMBER 15, 2018].\n")); + } else { + assertThat(message, startsWith("License [will expire] on [Thursday, November 15, 2018].\n")); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 8ad42d5afe636..bc8d7817f4d69 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -62,6 +62,10 @@ public static OperationMode randomTrialOrPlatinumMode() { return randomFrom(TRIAL, PLATINUM); } + public static OperationMode randomTrialGoldOrPlatinumMode() { + return randomFrom(TRIAL, GOLD, PLATINUM); + } + public static OperationMode randomTrialBasicStandardGoldOrPlatinumMode() { return randomFrom(TRIAL, BASIC, STANDARD, GOLD, PLATINUM); } @@ -92,17 +96,41 @@ public void testTransportSslDoesNotAutomaticallyEnableSecurityOnTrialLicense() { assertSecurityNotAllowed(licenseState); } - public void testSecurityBasic() { - XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, - Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + public void testSecurityBasicWithoutExplicitSecurityEnabled() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); licenseState.update(BASIC, true, null); - assertSecurityNotAllowed(licenseState); + assertThat(licenseState.isAuthAllowed(), is(false)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + + assertThat(licenseState.isSecurityAvailable(), is(true)); + assertThat(licenseState.isSecurityDisabledByLicenseDefaults(), is(true)); } - public void testSecurityBasicExpired() { - XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, - Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + public void testSecurityBasicWithExplicitSecurityEnabled() { + final Settings settings = Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(); + XPackLicenseState licenseState = new XPackLicenseState(settings); + licenseState.update(BASIC, true, null); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NATIVE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + + assertThat(licenseState.isSecurityAvailable(), is(true)); + assertThat(licenseState.isSecurityDisabledByLicenseDefaults(), is(false)); + } + + public void testSecurityDefaultBasicExpired() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); licenseState.update(BASIC, false, null); assertThat(licenseState.isAuthAllowed(), is(false)); @@ -114,6 +142,20 @@ public void testSecurityBasicExpired() { assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); } + public void testSecurityEnabledBasicExpired() { + XPackLicenseState licenseState = new XPackLicenseState( + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build()); + licenseState.update(BASIC, false, null); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(false)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NATIVE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + public void testSecurityStandard() { XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); @@ -202,7 +244,7 @@ public void testNewTrialDefaultsSecurityOff() { XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); licenseState.update(TRIAL, true, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)); - assertThat(licenseState.isSecurityDisabledByTrialLicense(), is(true)); + assertThat(licenseState.isSecurityDisabledByLicenseDefaults(), is(true)); assertSecurityNotAllowed(licenseState); } @@ -225,8 +267,12 @@ public void testSecurityAckAnyToTrialOrPlatinum() { assertAckMesssages(XPackField.SECURITY, randomMode(), randomTrialOrPlatinumMode(), 0); } - public void testSecurityAckTrialStandardGoldOrPlatinumToBasic() { - assertAckMesssages(XPackField.SECURITY, randomTrialStandardGoldOrPlatinumMode(), BASIC, 4); + public void testSecurityAckTrialGoldOrPlatinumToBasic() { + assertAckMesssages(XPackField.SECURITY, randomTrialGoldOrPlatinumMode(), BASIC, 7); + } + + public void testSecurityAckStandardToBasic() { + assertAckMesssages(XPackField.SECURITY, STANDARD, BASIC, 1); } public void testSecurityAckAnyToStandard() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java deleted file mode 100644 index 0e09a05fb967a..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoRequestTests.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.migration; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - -public class IndexUpgradeInfoRequestTests extends AbstractWireSerializingTestCase { - @Override - protected IndexUpgradeInfoRequest createTestInstance() { - int indexCount = randomInt(4); - String[] indices = new String[indexCount]; - for (int i = 0; i < indexCount; i++) { - indices[i] = randomAlphaOfLength(10); - } - IndexUpgradeInfoRequest request = new IndexUpgradeInfoRequest(indices); - if (randomBoolean()) { - request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - } - return request; - } - - @Override - protected Writeable.Reader instanceReader() { - return IndexUpgradeInfoRequest::new; - } - - public void testNullIndices() { - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); - expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java deleted file mode 100644 index 77ad986f0c355..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/migration/IndexUpgradeInfoResponseTests.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.protocol.xpack.migration; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.protocol.AbstractHlrcStreamableXContentTestCase; - -import java.util.AbstractMap; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.stream.Collectors; - -public class IndexUpgradeInfoResponseTests extends - AbstractHlrcStreamableXContentTestCase { - - @Override - public org.elasticsearch.client.migration.IndexUpgradeInfoResponse doHlrcParseInstance(XContentParser parser) { - return org.elasticsearch.client.migration.IndexUpgradeInfoResponse.fromXContent(parser); - } - - @Override - public IndexUpgradeInfoResponse convertHlrcToInternal(org.elasticsearch.client.migration.IndexUpgradeInfoResponse instance) { - final Map actions = instance.getActions(); - return new IndexUpgradeInfoResponse(actions.entrySet().stream().map( - e -> new AbstractMap.SimpleEntry<>(e.getKey(), UpgradeActionRequired.valueOf(e.getValue().name())) - ).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); - } - - @Override - protected IndexUpgradeInfoResponse createBlankInstance() { - return new IndexUpgradeInfoResponse(); - } - - @Override - protected IndexUpgradeInfoResponse createTestInstance() { - return randomIndexUpgradeInfoResponse(randomIntBetween(0, 10)); - } - - private static IndexUpgradeInfoResponse randomIndexUpgradeInfoResponse(int numIndices) { - Map actions = new HashMap<>(); - for (int i = 0; i < numIndices; i++) { - actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); - } - return new IndexUpgradeInfoResponse(actions); - } - - @Override - protected IndexUpgradeInfoResponse mutateInstance(IndexUpgradeInfoResponse instance) { - if (instance.getActions().size() == 0) { - return randomIndexUpgradeInfoResponse(1); - } - Map actions = new HashMap<>(instance.getActions()); - if (randomBoolean()) { - Iterator> iterator = actions.entrySet().iterator(); - iterator.next(); - iterator.remove(); - } else { - actions.put(randomAlphaOfLength(5), randomFrom(UpgradeActionRequired.values())); - } - return new IndexUpgradeInfoResponse(actions); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 28244b523e129..ec1f002d05ba8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -208,7 +209,8 @@ public void testRestoreMinmal() throws IOException { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("src_only", snapshotId), Version.CURRENT, indexId.getId())); IndexMetaData metaData = runAsSnapshot(threadPool, () -> repository.getSnapshotIndexMetaData(snapshotId, indexId)); - IndexShard restoredShard = newShard(shardRouting, metaData, null, SourceOnlySnapshotRepository.getEngineFactory(), () -> {}); + IndexShard restoredShard = newShard( + shardRouting, metaData, null, SourceOnlySnapshotRepository.getEngineFactory(), () -> {}, RetentionLeaseSyncer.EMPTY); restoredShard.mapperService().merge(shard.indexSettings().getIndexMetaData(), MapperService.MergeReason.MAPPING_RECOVERY); DiscoveryNode discoveryNode = new DiscoveryNode("node_g", buildNewFakeTransportAddress(), Version.CURRENT); restoredShard.markAsRecovering("test from snap", new RecoveryState(restoredShard.routingEntry(), discoveryNode, null)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java index 59ed1dcd17bbc..2eff0384b6f92 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.AbstractStreamableTestCase; @@ -28,10 +29,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.empty; import static org.hamcrest.core.IsEqual.equalTo; @@ -91,9 +94,9 @@ public void testFrom() throws IOException { Collections.unmodifiableList(Arrays.asList( (idx) -> indexIssueFound ? foundIssue : null )); - List> mlSettingsChecks = + List> mlSettingsChecks = Collections.unmodifiableList(Arrays.asList( - (idx) -> mlIssueFound ? foundIssue : null + (idx, unused) -> mlIssueFound ? foundIssue : null )); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( @@ -101,10 +104,10 @@ public void testFrom() throws IOException { nodeIssueFound ? Collections.singletonList( new NodesDeprecationCheckAction.NodeResponse(discoveryNode, Collections.singletonList(foundIssue))) - : Collections.emptyList(), - Collections.emptyList()); + : emptyList(), + emptyList()); - DeprecationInfoAction.Response response = DeprecationInfoAction.Response.from(state, + DeprecationInfoAction.Response response = DeprecationInfoAction.Response.from(state, NamedXContentRegistry.EMPTY, resolver, Strings.EMPTY_ARRAY, indicesOptions, datafeeds, nodeDeprecationIssues, indexSettingsChecks, clusterSettingsChecks, mlSettingsChecks); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java index ae0551020fbd1..2a549c2a32812 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SegmentCountStepTests.java @@ -138,7 +138,7 @@ public void onFailure(Exception e) { assertEquals(new SegmentCountStep.Info(0L), conditionInfo.get()); } - public void testIsConditionFails() { + public void testIsConditionIsTrueEvenWhenMoreSegments() { int maxNumSegments = randomIntBetween(3, 10); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); Client client = Mockito.mock(Client.class); @@ -191,8 +191,8 @@ public void onFailure(Exception e) { } }); - assertFalse(conditionMetResult.get()); - assertEquals(new SegmentCountStep.Info(1L), conditionInfo.get()); + assertTrue(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(0L), conditionInfo.get()); } public void testThrowsException() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java index 525744d68af10..794e7b2edaa0f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/SetSingleNodeAllocateStepTests.java @@ -202,6 +202,7 @@ public void testPerformActionAttrsNoNodesValid() { assertNoValidNode(indexMetaData, index, nodes); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42932") public void testPerformActionAttrsRequestFails() { int numAttrs = randomIntBetween(1, 10); String[][] validAttrs = new String[numAttrs][2]; @@ -213,7 +214,7 @@ public void testPerformActionAttrsRequestFails() { indexSettings.put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + attr[0], attr[1]); } IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10)).settings(indexSettings) - .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); Index index = indexMetaData.getIndex(); Set validNodeIds = new HashSet<>(); Settings validNodeSettings = Settings.EMPTY; @@ -225,17 +226,17 @@ public void testPerformActionAttrsRequestFails() { int nodePort = 9300 + i; String[] nodeAttr = randomFrom(validAttrs); Settings nodeSettings = Settings.builder().put(validNodeSettings).put(Node.NODE_NAME_SETTING.getKey(), nodeName) - .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); + .put(Node.NODE_ATTRIBUTES.getKey() + nodeAttr[0], nodeAttr[1]).build(); nodes.add(DiscoveryNode.createLocal(nodeSettings, new TransportAddress(TransportAddress.META_ADDRESS, nodePort), nodeId)); validNodeIds.add(nodeId); } ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder().fPut(index.getName(), - indexMetaData); + indexMetaData); IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) - .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); + .addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node_id_0", true, ShardRoutingState.STARTED)); ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metaData(MetaData.builder().indices(indices.build())) - .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); + .nodes(nodes).routingTable(RoutingTable.builder().add(indexRoutingTable).build()).build(); SetSingleNodeAllocateStep step = createRandomInstance(); Exception exception = new RuntimeException(); @@ -253,8 +254,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertSettingsRequestContainsValueFrom(request, - IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, - indexMetaData.getIndex().getName()); + IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", validNodeIds, true, + indexMetaData.getIndex().getName()); listener.onFailure(exception); return null; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java index e92f1dce6a477..61c19a1d7d19a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/UnfollowFollowIndexStepTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.indexlifecycle; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -16,6 +17,8 @@ import org.mockito.Mockito; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.core.indexlifecycle.UnfollowAction.CCR_METADATA_KEY; import static org.hamcrest.Matchers.equalTo; @@ -112,4 +115,47 @@ public void onFailure(Exception e) { assertThat(completed[0], nullValue()); assertThat(failure[0], sameInstance(error)); } + + public void testFailureToReleaseRetentionLeases() { + IndexMetaData indexMetadata = IndexMetaData.builder("follower-index") + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true")) + .putCustom(CCR_METADATA_KEY, Collections.emptyMap()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + Client client = Mockito.mock(Client.class); + AdminClient adminClient = Mockito.mock(AdminClient.class); + Mockito.when(client.admin()).thenReturn(adminClient); + IndicesAdminClient indicesClient = Mockito.mock(IndicesAdminClient.class); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + // Mock unfollow api call: + ElasticsearchException error = new ElasticsearchException("text exception"); + error.addMetadata("es.failed_to_remove_retention_leases", randomAlphaOfLength(10)); + Mockito.doAnswer(invocation -> { + UnfollowAction.Request request = (UnfollowAction.Request) invocation.getArguments()[1]; + assertThat(request.getFollowerIndex(), equalTo("follower-index")); + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + listener.onFailure(error); + return null; + }).when(client).execute(Mockito.same(UnfollowAction.INSTANCE), Mockito.any(), Mockito.any()); + + AtomicBoolean completed = new AtomicBoolean(false); + AtomicReference failure = new AtomicReference<>(); + UnfollowFollowIndexStep step = new UnfollowFollowIndexStep(randomStepKey(), randomStepKey(), client); + step.performAction(indexMetadata, null, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + completed.set(complete); + } + + @Override + public void onFailure(Exception e) { + failure.set(e); + } + }); + assertThat(completed.get(), equalTo(true)); + assertThat(failure.get(), nullValue()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java index f1f3c053e2345..6953455489d1a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java @@ -132,6 +132,42 @@ public void onFailure(Exception e) { containsString("this index is a leader index; waiting for all following indices to cease following before proceeding")); } + public void testNoShardStats() { + WaitForNoFollowersStep step = createRandomInstance(); + + String indexName = randomAlphaOfLengthBetween(5,10); + + int numberOfShards = randomIntBetween(1, 100); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(randomIntBetween(1, 10)) + .build(); + + ShardStats sStats = new ShardStats(null, mockShardPath(), null, null, null, null); + ShardStats[] shardStats = new ShardStats[1]; + shardStats[0] = sStats; + mockIndexStatsCall(step.getClient(), indexName, new IndexStats(indexName, "uuid", shardStats)); + + final SetOnce conditionMetHolder = new SetOnce<>(); + final SetOnce stepInfoHolder = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + conditionMetHolder.set(conditionMet); + stepInfoHolder.set(infomationContext); + } + + @Override + public void onFailure(Exception e) { + fail("onFailure should not be called in this test, called with exception: " + e.getMessage()); + } + }); + + assertTrue(conditionMetHolder.get()); + assertNull(stepInfoHolder.get()); + } + public void testFailure() { WaitForNoFollowersStep step = createRandomInstance(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java index 05209628542fe..6508ee5cb2054 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.time.Clock; +import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.EnumSet; @@ -28,7 +29,7 @@ public class ScheduledEventTests extends AbstractSerializingTestCase { public static ScheduledEvent createScheduledEvent(String calendarId) { - ZonedDateTime start = Clock.systemUTC().instant().atZone(ZoneOffset.UTC); + ZonedDateTime start = nowWithMillisResolution(); return new ScheduledEvent(randomAlphaOfLength(10), start, start.plusSeconds(randomIntBetween(1, 10000)), calendarId, null); } @@ -119,4 +120,8 @@ public void testLenientParser() throws IOException { ScheduledEvent.LENIENT_PARSER.apply(parser, null); } } + + private static ZonedDateTime nowWithMillisResolution() { + return Instant.ofEpochMilli(Clock.systemUTC().millis()).atZone(ZoneOffset.UTC); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java new file mode 100644 index 0000000000000..b616158dae451 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderTests.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + + +public class AggProviderTests extends AbstractSerializingTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } + + @Override + protected AggProvider createTestInstance() { + return createRandomValidAggProvider(); + } + + @Override + protected Writeable.Reader instanceReader() { + return AggProvider::fromStream; + } + + @Override + protected AggProvider doParseInstance(XContentParser parser) throws IOException { + return AggProvider.fromXContent(parser, false); + } + + public static AggProvider createRandomValidAggProvider() { + return createRandomValidAggProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)); + } + + public static AggProvider createRandomValidAggProvider(String name, String field) { + Map agg = Collections.singletonMap(name, + Collections.singletonMap("avg", Collections.singletonMap("field", field))); + try { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + AggregatorFactories.Builder aggs = + XContentObjectTransformer.aggregatorTransformer(new NamedXContentRegistry(searchModule.getNamedXContents())) + .fromMap(agg); + return new AggProvider(agg, aggs, null); + } catch (IOException ex) { + fail(ex.getMessage()); + } + return null; + } + + public void testEmptyAggMap() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{}"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> AggProvider.fromXContent(parser, false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Datafeed aggregations are not parsable")); + } + + public void testSerializationBetweenBugVersion() throws IOException { + AggProvider tempAggProvider = createRandomValidAggProvider(); + AggProvider aggProviderWithEx = new AggProvider(tempAggProvider.getAggs(), tempAggProvider.getParsedAggs(), new IOException("ex")); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_6_6_0); + aggProviderWithEx.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { + in.setVersion(Version.V_6_6_0); + AggProvider streamedAggProvider = AggProvider.fromStream(in); + assertThat(streamedAggProvider.getAggs(), equalTo(aggProviderWithEx.getAggs())); + assertThat(streamedAggProvider.getParsingException(), is(nullValue())); + + AggregatorFactories.Builder streamedParsedAggs = XContentObjectTransformer.aggregatorTransformer(xContentRegistry()) + .fromMap(streamedAggProvider.getAggs()); + assertThat(streamedParsedAggs, equalTo(aggProviderWithEx.getParsedAggs())); + assertThat(streamedAggProvider.getParsedAggs(), is(nullValue())); + } + } + } + + public void testSerializationBetweenEagerVersion() throws IOException { + AggProvider validAggProvider = createRandomValidAggProvider(); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_6_0_0); + validAggProvider.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { + in.setVersion(Version.V_6_0_0); + AggProvider streamedAggProvider = AggProvider.fromStream(in); + assertThat(streamedAggProvider.getAggs(), equalTo(validAggProvider.getAggs())); + assertThat(streamedAggProvider.getParsingException(), is(nullValue())); + assertThat(streamedAggProvider.getParsedAggs(), equalTo(validAggProvider.getParsedAggs())); + } + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), + validAggProvider.getParsedAggs(), + new IOException("bad parsing")); + output.setVersion(Version.V_6_0_0); + IOException ex = expectThrows(IOException.class, () -> aggProviderWithEx.writeTo(output)); + assertThat(ex.getMessage(), equalTo("bad parsing")); + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + AggProvider aggProviderWithEx = new AggProvider(validAggProvider.getAggs(), + validAggProvider.getParsedAggs(), + new ElasticsearchException("bad parsing")); + output.setVersion(Version.V_6_0_0); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithEx.writeTo(output)); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + AggProvider aggProviderWithOutParsed = new AggProvider(validAggProvider.getAggs(), null, null); + output.setVersion(Version.V_6_0_0); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> aggProviderWithOutParsed.writeTo(output)); + assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed aggregations are null")); + } + } + + @Override + protected AggProvider mutateInstance(AggProvider instance) throws IOException { + Exception parsingException = instance.getParsingException(); + AggregatorFactories.Builder parsedAggs = instance.getParsedAggs(); + switch (between(0, 1)) { + case 0: + parsingException = parsingException == null ? new IOException("failed parsing") : null; + break; + case 1: + parsedAggs = parsedAggs == null ? + XContentObjectTransformer.aggregatorTransformer(xContentRegistry()).fromMap(instance.getAggs()) : + null; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new AggProvider(instance.getAggs(), parsedAggs, parsingException); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 788870013885e..71491c9227728 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.script.Script; @@ -58,17 +57,16 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.core.ml.datafeed.QueryProviderTests.createRandomValidQueryProvider; +import static org.elasticsearch.xpack.core.ml.job.messages.Messages.DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_GREATER_THAN_ZERO; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; public class DatafeedConfigTests extends AbstractSerializingTestCase { @@ -89,7 +87,7 @@ private static DatafeedConfig.Builder createRandomizedDatafeedConfigBuilder(Stri DatafeedConfig.Builder builder = new DatafeedConfig.Builder(randomValidDatafeedId(), jobId); builder.setIndices(randomStringList(1, 10)); if (randomBoolean()) { - builder.setParsedQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + builder.setQueryProvider(createRandomValidQueryProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); } boolean addScriptFields = randomBoolean(); if (addScriptFields) { @@ -214,9 +212,44 @@ protected DatafeedConfig doParseInstance(XContentParser parser) { " }\n" + "}"; + private static final String MULTIPLE_AGG_DEF_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"aggregations\": {\n" + + " \"buckets\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }," + + " \"aggs\": {\n" + + " \"buckets2\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + public void testFutureConfigParse() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage()); @@ -224,49 +257,66 @@ public void testFutureConfigParse() throws IOException { public void testPastQueryConfigParse() throws IOException { try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) { + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) { DatafeedConfig config = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build(); - ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> config.getParsedQuery()); - assertEquals("[match] query doesn't support multiple fields, found [query] and [type]", e.getMessage()); + assertThat(config.getQueryParsingException().getMessage(), + equalTo("[match] query doesn't support multiple fields, found [query] and [type]")); } try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) { + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) { XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[6:25] [datafeed_config] failed to parse field [query]", e.getMessage()); + assertEquals("[6:64] [datafeed_config] failed to parse field [query]", e.getMessage()); } } public void testPastAggConfigParse() throws IOException { try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { - DatafeedConfig.Builder configBuilder = DatafeedConfig.LENIENT_PARSER.apply(parser, null); - ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> configBuilder.build()); - assertEquals( - "Datafeed [farequote-datafeed] aggregations are not parsable: [size] must be greater than 0. Found [0] in [airline]", - e.getMessage()); + DatafeedConfig datafeedConfig = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build(); + assertThat(datafeedConfig.getAggParsingException().getMessage(), + equalTo("[size] must be greater than 0. Found [0] in [airline]")); } try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { XContentParseException e = expectThrows(XContentParseException.class, () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); - assertEquals("[8:25] [datafeed_config] failed to parse field [aggregations]", e.getMessage()); + assertEquals("[25:3] [datafeed_config] failed to parse field [aggregations]", e.getMessage()); } } public void testFutureMetadataParse() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); // Unlike the config version of this test, the metadata parser should tolerate the unknown future field assertNotNull(DatafeedConfig.LENIENT_PARSER.apply(parser, null).build()); } + public void testMultipleDefinedAggParse() throws IOException { + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedConfig.LENIENT_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_config] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedConfig.STRICT_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_config] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + } + public void testToXContentForInternalStorage() throws IOException { DatafeedConfig.Builder builder = createRandomizedDatafeedConfigBuilder("foo", 300); @@ -443,7 +493,7 @@ public void testBuild_GivenHistogramWithDefaultInterval() { ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build); - assertThat(e.getMessage(), containsString("[interval] must be >0 for histogram aggregation [time]")); + assertThat(e.getMessage(), containsString(DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_GREATER_THAN_ZERO)); } public void testBuild_GivenDateHistogramWithInvalidTimeZone() { @@ -466,16 +516,16 @@ public void testBuild_GivenDateHistogramWithDefaultInterval() { public void testBuild_GivenValidDateHistogram() { long millisInDay = 24 * 3600000L; - assertThat(createDatafeedWithDateHistogram("1s").getHistogramIntervalMillis(), equalTo(1000L)); - assertThat(createDatafeedWithDateHistogram("2s").getHistogramIntervalMillis(), equalTo(2000L)); - assertThat(createDatafeedWithDateHistogram("1m").getHistogramIntervalMillis(), equalTo(60000L)); - assertThat(createDatafeedWithDateHistogram("2m").getHistogramIntervalMillis(), equalTo(120000L)); - assertThat(createDatafeedWithDateHistogram("1h").getHistogramIntervalMillis(), equalTo(3600000L)); - assertThat(createDatafeedWithDateHistogram("2h").getHistogramIntervalMillis(), equalTo(7200000L)); - assertThat(createDatafeedWithDateHistogram("1d").getHistogramIntervalMillis(), equalTo(millisInDay)); - assertThat(createDatafeedWithDateHistogram("7d").getHistogramIntervalMillis(), equalTo(7 * millisInDay)); + assertThat(createDatafeedWithDateHistogram("1s").getHistogramIntervalMillis(xContentRegistry()), equalTo(1000L)); + assertThat(createDatafeedWithDateHistogram("2s").getHistogramIntervalMillis(xContentRegistry()), equalTo(2000L)); + assertThat(createDatafeedWithDateHistogram("1m").getHistogramIntervalMillis(xContentRegistry()), equalTo(60000L)); + assertThat(createDatafeedWithDateHistogram("2m").getHistogramIntervalMillis(xContentRegistry()), equalTo(120000L)); + assertThat(createDatafeedWithDateHistogram("1h").getHistogramIntervalMillis(xContentRegistry()), equalTo(3600000L)); + assertThat(createDatafeedWithDateHistogram("2h").getHistogramIntervalMillis(xContentRegistry()), equalTo(7200000L)); + assertThat(createDatafeedWithDateHistogram("1d").getHistogramIntervalMillis(xContentRegistry()), equalTo(millisInDay)); + assertThat(createDatafeedWithDateHistogram("7d").getHistogramIntervalMillis(xContentRegistry()), equalTo(7 * millisInDay)); - assertThat(createDatafeedWithDateHistogram(7 * millisInDay + 1).getHistogramIntervalMillis(), + assertThat(createDatafeedWithDateHistogram(7 * millisInDay + 1).getHistogramIntervalMillis(xContentRegistry()), equalTo(7 * millisInDay + 1)); } @@ -529,7 +579,8 @@ public void testValidateAggregations_GivenMulitpleHistogramAggs() { public void testDefaultFrequency_GivenNegative() { DatafeedConfig datafeed = createTestInstance(); - ESTestCase.expectThrows(IllegalArgumentException.class, () -> datafeed.defaultFrequency(TimeValue.timeValueSeconds(-1))); + ESTestCase.expectThrows(IllegalArgumentException.class, + () -> datafeed.defaultFrequency(TimeValue.timeValueSeconds(-1), xContentRegistry())); } public void testDefaultFrequency_GivenNoAggregations() { @@ -537,106 +588,79 @@ public void testDefaultFrequency_GivenNoAggregations() { datafeedBuilder.setIndices(Collections.singletonList("my_index")); DatafeedConfig datafeed = datafeedBuilder.build(); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(30))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(121))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(30), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(121), xContentRegistry())); - assertEquals(TimeValue.timeValueSeconds(61), datafeed.defaultFrequency(TimeValue.timeValueSeconds(122))); - assertEquals(TimeValue.timeValueSeconds(75), datafeed.defaultFrequency(TimeValue.timeValueSeconds(150))); - assertEquals(TimeValue.timeValueSeconds(150), datafeed.defaultFrequency(TimeValue.timeValueSeconds(300))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1200))); + assertEquals(TimeValue.timeValueSeconds(61), datafeed.defaultFrequency(TimeValue.timeValueSeconds(122), xContentRegistry())); + assertEquals(TimeValue.timeValueSeconds(75), datafeed.defaultFrequency(TimeValue.timeValueSeconds(150), xContentRegistry())); + assertEquals(TimeValue.timeValueSeconds(150), datafeed.defaultFrequency(TimeValue.timeValueSeconds(300), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1200), xContentRegistry())); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1201))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1800))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(2))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1201), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1800), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(2), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12), xContentRegistry())); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(12 * 3600 + 1))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(24))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(48))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(12 * 3600 + 1), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(24), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(48), xContentRegistry())); } public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Second() { DatafeedConfig datafeed = createDatafeedWithDateHistogram("1s"); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); - assertEquals(TimeValue.timeValueSeconds(125), datafeed.defaultFrequency(TimeValue.timeValueSeconds(250))); - assertEquals(TimeValue.timeValueSeconds(250), datafeed.defaultFrequency(TimeValue.timeValueSeconds(500))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120), xContentRegistry())); + assertEquals(TimeValue.timeValueSeconds(125), datafeed.defaultFrequency(TimeValue.timeValueSeconds(250), xContentRegistry())); + assertEquals(TimeValue.timeValueSeconds(250), datafeed.defaultFrequency(TimeValue.timeValueSeconds(500), xContentRegistry())); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13), xContentRegistry())); } public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Minute() { DatafeedConfig datafeed = createDatafeedWithDateHistogram("1m"); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); - assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(180))); - assertEquals(TimeValue.timeValueMinutes(2), datafeed.defaultFrequency(TimeValue.timeValueSeconds(240))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(180), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(2), datafeed.defaultFrequency(TimeValue.timeValueSeconds(240), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20), xContentRegistry())); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(20 * 60 + 1))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(6))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); + assertEquals(TimeValue.timeValueMinutes(10), + datafeed.defaultFrequency(TimeValue.timeValueSeconds(20 * 60 + 1), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(6), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12), xContentRegistry())); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(72))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(72), xContentRegistry())); } public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_10_Minutes() { DatafeedConfig datafeed = createDatafeedWithDateHistogram("10m"); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(10))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(30))); - assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(12 * 60))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueMinutes(13 * 60))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(10), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(30), xContentRegistry())); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(12 * 60), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueMinutes(13 * 60), xContentRegistry())); } public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Hour() { DatafeedConfig datafeed = createDatafeedWithDateHistogram("1h"); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(3601))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(2))); - assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); - } - - public void testGetAggDeprecations() { - DatafeedConfig datafeed = createDatafeedWithDateHistogram("1h"); - String deprecationWarning = "Warning"; - List deprecations = datafeed.getAggDeprecations((map, id, deprecationlist) -> { - deprecationlist.add(deprecationWarning); - return new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder("field").field("field")); - }); - assertThat(deprecations, hasItem(deprecationWarning)); - - DatafeedConfig spiedConfig = spy(datafeed); - spiedConfig.getAggDeprecations(); - verify(spiedConfig).getAggDeprecations(DatafeedConfig.lazyAggParser); - } - - public void testGetQueryDeprecations() { - DatafeedConfig datafeed = createDatafeedWithDateHistogram("1h"); - String deprecationWarning = "Warning"; - List deprecations = datafeed.getQueryDeprecations((map, id, deprecationlist) -> { - deprecationlist.add(deprecationWarning); - return new BoolQueryBuilder(); - }); - assertThat(deprecations, hasItem(deprecationWarning)); - - DatafeedConfig spiedConfig = spy(datafeed); - spiedConfig.getQueryDeprecations(); - verify(spiedConfig).getQueryDeprecations(DatafeedConfig.lazyQueryParser); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(1), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(3601), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(2), xContentRegistry())); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(12), xContentRegistry())); } public void testSerializationOfComplexAggs() throws IOException { @@ -656,9 +680,8 @@ public void testSerializationOfComplexAggs() throws IOException { .subAggregation(derivativePipelineAggregationBuilder) .subAggregation(bucketScriptPipelineAggregationBuilder); DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilderWithDateHistogram(dateHistogram); - QueryBuilder terms = - new BoolQueryBuilder().filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - datafeedConfigBuilder.setParsedQuery(terms); + datafeedConfigBuilder.setQueryProvider( + createRandomValidQueryProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder().addAggregator(dateHistogram); @@ -674,18 +697,20 @@ public void testSerializationOfComplexAggs() throws IOException { assertEquals(datafeedConfig, parsedDatafeedConfig); // Assert that the parsed versions of our aggs and queries work as well - assertEquals(aggBuilder, parsedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, parsedDatafeedConfig.getParsedQuery()); + assertEquals(aggBuilder, parsedDatafeedConfig.getParsedAggregations(xContentRegistry())); + assertEquals(datafeedConfig.getQuery(), parsedDatafeedConfig.getQuery()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); try(BytesStreamOutput output = new BytesStreamOutput()) { datafeedConfig.writeTo(output); - try(StreamInput streamInput = output.bytes().streamInput()) { + try(StreamInput streamInput = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { DatafeedConfig streamedDatafeedConfig = new DatafeedConfig(streamInput); assertEquals(datafeedConfig, streamedDatafeedConfig); // Assert that the parsed versions of our aggs and queries work as well - assertEquals(aggBuilder, streamedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, streamedDatafeedConfig.getParsedQuery()); + assertEquals(aggBuilder, streamedDatafeedConfig.getParsedAggregations(xContentRegistry())); + assertEquals(datafeedConfig.getQuery(), streamedDatafeedConfig.getQuery()); } } } @@ -707,9 +732,13 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { .subAggregation(derivativePipelineAggregationBuilder) .subAggregation(bucketScriptPipelineAggregationBuilder); DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilderWithDateHistogram(dateHistogram); - QueryBuilder terms = - new BoolQueryBuilder().filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - datafeedConfigBuilder.setParsedQuery(terms); + // So equality check between the streamed and current passes + // Streamed DatafeedConfigs when they are before 6.6.0 require a parsed object for aggs and queries, consequently all the default + // values are added between them + datafeedConfigBuilder.setQueryProvider( + QueryProvider + .fromParsedQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))))); DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); @@ -725,8 +754,8 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { // Assert that the parsed versions of our aggs and queries work as well assertEquals(new AggregatorFactories.Builder().addAggregator(dateHistogram), - streamedDatafeedConfig.getParsedAggregations()); - assertEquals(terms, streamedDatafeedConfig.getParsedQuery()); + streamedDatafeedConfig.getParsedAggregations(xContentRegistry())); + assertEquals(datafeedConfig.getParsedQuery(xContentRegistry()), streamedDatafeedConfig.getParsedQuery(xContentRegistry())); } } } @@ -801,15 +830,15 @@ protected DatafeedConfig mutateInstance(DatafeedConfig instance) throws IOExcept break; case 5: BoolQueryBuilder query = new BoolQueryBuilder(); - if (instance.getParsedQuery() != null) { - query.must(instance.getParsedQuery()); + if (instance.getParsedQuery(xContentRegistry()) != null) { + query.must(instance.getParsedQuery(xContentRegistry())); } query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); builder.setParsedQuery(query); break; case 6: if (instance.hasAggregations()) { - builder.setAggregations(null); + builder.setAggProvider(null); } else { AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); String timeField = randomAlphaOfLength(10); @@ -826,7 +855,7 @@ protected DatafeedConfig mutateInstance(DatafeedConfig instance) throws IOExcept ArrayList scriptFields = new ArrayList<>(instance.getScriptFields()); scriptFields.add(new ScriptField(randomAlphaOfLengthBetween(1, 10), new Script("foo"), true)); builder.setScriptFields(scriptFields); - builder.setAggregations(null); + builder.setAggProvider(null); break; case 8: builder.setScrollSize(instance.getScrollSize() + between(1, 100)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 302bfefc7c42a..816d33cfb1e30 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -5,13 +5,21 @@ */ package org.elasticsearch.xpack.core.ml.datafeed; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; @@ -19,18 +27,27 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.PipelineAggregatorBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; import org.elasticsearch.xpack.core.ml.job.config.JobTests; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; +import java.io.IOException; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import static org.elasticsearch.xpack.core.ml.datafeed.AggProviderTests.createRandomValidAggProvider; +import static org.elasticsearch.xpack.core.ml.datafeed.QueryProviderTests.createRandomValidQueryProvider; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -61,7 +78,7 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf builder.setIndices(DatafeedConfigTests.randomStringList(1, 10)); } if (randomBoolean()) { - builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + builder.setQuery(createRandomValidQueryProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); } if (randomBoolean()) { int scriptsSize = randomInt(3); @@ -75,10 +92,8 @@ public static DatafeedUpdate createRandomized(String datafeedId, @Nullable Dataf if (randomBoolean() && datafeed == null) { // can only test with a single agg as the xcontent order gets randomized by test base class and then // the actual xcontent isn't the same and test fail. - // Testing with a single agg is ok as we don't have special list writeable / xconent logic - AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); - aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); - builder.setAggregations(aggs); + // Testing with a single agg is ok as we don't have special list writeable / xcontent logic + builder.setAggregations(createRandomValidAggProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); } if (randomBoolean()) { builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); @@ -114,6 +129,52 @@ protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(searchModule.getNamedXContents()); } + private static final String MULTIPLE_AGG_DEF_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"aggregations\": {\n" + + " \"buckets\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }," + + " \"aggs\": {\n" + + " \"buckets2\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"time\",\n" + + " \"interval\": \"360s\",\n" + + " \"time_zone\": \"UTC\"\n" + + " },\n" + + " \"aggregations\": {\n" + + " \"time\": {\n" + + " \"max\": {\"field\": \"time\"}\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + public void testMultipleDefinedAggParse() throws IOException { + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, MULTIPLE_AGG_DEF_DATAFEED)) { + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DatafeedUpdate.PARSER.apply(parser, null)); + assertThat(ex.getMessage(), equalTo("[32:3] [datafeed_update] failed to parse field [aggs]")); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("Found two aggregation definitions: [aggs] and [aggregations]")); + } + } + public void testApply_failBecauseTargetDatafeedHasDifferentId() { DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed, null)); @@ -143,13 +204,13 @@ public void testApply_givenFullUpdateNoAggregations() { DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed"); datafeedBuilder.setIndices(Collections.singletonList("i_1")); DatafeedConfig datafeed = datafeedBuilder.build(); - + QueryProvider queryProvider = createRandomValidQueryProvider("a", "b"); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); update.setJobId("bar"); update.setIndices(Collections.singletonList("i_2")); update.setQueryDelay(TimeValue.timeValueSeconds(42)); update.setFrequency(TimeValue.timeValueSeconds(142)); - update.setQuery(QueryBuilders.termQuery("a", "b")); + update.setQuery(queryProvider); update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))); update.setScrollSize(8000); update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); @@ -161,7 +222,7 @@ public void testApply_givenFullUpdateNoAggregations() { assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2"))); assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42))); assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142))); - assertThat(updatedDatafeed.getParsedQuery(), equalTo(QueryBuilders.termQuery("a", "b"))); + assertThat(updatedDatafeed.getQuery(), equalTo(queryProvider.getQuery())); assertThat(updatedDatafeed.hasAggregations(), is(false)); assertThat(updatedDatafeed.getScriptFields(), equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)))); @@ -171,22 +232,22 @@ public void testApply_givenFullUpdateNoAggregations() { assertThat(updatedDatafeed.getDelayedDataCheckConfig().getCheckWindow(), equalTo(TimeValue.timeValueHours(1))); } - public void testApply_givenAggregations() { + public void testApply_givenAggregations() throws IOException { DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed"); datafeedBuilder.setIndices(Collections.singletonList("i_1")); DatafeedConfig datafeed = datafeedBuilder.build(); DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - update.setAggregations(new AggregatorFactories.Builder().addAggregator( - AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))); + AggProvider aggProvider = AggProvider.fromParsedAggs(new AggregatorFactories.Builder().addAggregator( + AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))); + update.setAggregations(aggProvider); DatafeedConfig updatedDatafeed = update.build().apply(datafeed, Collections.emptyMap()); assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1"))); - assertThat(updatedDatafeed.getParsedAggregations(), - equalTo(new AggregatorFactories.Builder().addAggregator( - AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime)))); + assertThat(updatedDatafeed.getParsedAggregations(xContentRegistry()), equalTo(aggProvider.getParsedAggs())); + assertThat(updatedDatafeed.getAggregations(), equalTo(aggProvider.getAggs())); } public void testApply_GivenRandomUpdates_AssertImmutability() { @@ -194,7 +255,7 @@ public void testApply_GivenRandomUpdates_AssertImmutability() { DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig(JobTests.randomValidJobId()); if (datafeed.getAggregations() != null) { DatafeedConfig.Builder withoutAggs = new DatafeedConfig.Builder(datafeed); - withoutAggs.setAggregations(null); + withoutAggs.setAggProvider(null); datafeed = withoutAggs.build(); } DatafeedUpdate update = createRandomized(datafeed.getId(), datafeed); @@ -208,8 +269,57 @@ public void testApply_GivenRandomUpdates_AssertImmutability() { } } + public void testSerializationOfComplexAggsBetweenVersions() throws IOException { + MaxAggregationBuilder maxTime = AggregationBuilders.max("timestamp").field("timestamp"); + AvgAggregationBuilder avgAggregationBuilder = AggregationBuilders.avg("bytes_in_avg").field("system.network.in.bytes"); + DerivativePipelineAggregationBuilder derivativePipelineAggregationBuilder = + PipelineAggregatorBuilders.derivative("bytes_in_derivative", "bytes_in_avg"); + BucketScriptPipelineAggregationBuilder bucketScriptPipelineAggregationBuilder = + PipelineAggregatorBuilders.bucketScript("non_negative_bytes", + Collections.singletonMap("bytes", "bytes_in_derivative"), + new Script("params.bytes > 0 ? params.bytes : null")); + DateHistogramAggregationBuilder dateHistogram = + AggregationBuilders.dateHistogram("histogram_buckets") + .field("timestamp").interval(300000).timeZone(ZoneOffset.UTC) + .subAggregation(maxTime) + .subAggregation(avgAggregationBuilder) + .subAggregation(derivativePipelineAggregationBuilder) + .subAggregation(bucketScriptPipelineAggregationBuilder); + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder().addAggregator(dateHistogram); + DatafeedUpdate.Builder datafeedUpdateBuilder = new DatafeedUpdate.Builder("df-update-past-serialization-test"); + datafeedUpdateBuilder.setAggregations(new AggProvider( + XContentObjectTransformer.aggregatorTransformer(xContentRegistry()).toMap(aggs), + aggs, + null)); + // So equality check between the streamed and current passes + // Streamed DatafeedConfigs when they are before 6.6.0 require a parsed object for aggs and queries, consequently all the default + // values are added between them + datafeedUpdateBuilder.setQuery( + QueryProvider + .fromParsedQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))))); + DatafeedUpdate datafeedUpdate = datafeedUpdateBuilder.build(); + + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_6_7_0); + datafeedUpdate.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + in.setVersion(Version.V_6_7_0); + DatafeedUpdate streamedDatafeedUpdate = new DatafeedUpdate(in); + assertEquals(datafeedUpdate, streamedDatafeedUpdate); + + // Assert that the parsed versions of our aggs and queries work as well + assertEquals(aggs, streamedDatafeedUpdate.getParsedAgg(xContentRegistry())); + assertEquals(datafeedUpdate.getParsedQuery(xContentRegistry()), streamedDatafeedUpdate.getParsedQuery(xContentRegistry())); + } + } + } + @Override - protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { + protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) throws IOException { DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance); switch (between(0, 9)) { case 0: @@ -245,10 +355,10 @@ protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { case 5: BoolQueryBuilder query = new BoolQueryBuilder(); if (instance.getQuery() != null) { - query.must(instance.getQuery()); + query.must(instance.getParsedQuery(xContentRegistry())); } query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); - builder.setQuery(query); + builder.setQuery(QueryProvider.fromParsedQuery(query)); break; case 6: if (instance.hasAggregations()) { @@ -257,8 +367,8 @@ protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); String timeField = randomAlphaOfLength(10); aggBuilder.addAggregator(new DateHistogramAggregationBuilder(timeField).field(timeField).interval(between(10000, 3600000)) - .subAggregation(new MaxAggregationBuilder(timeField).field(timeField))); - builder.setAggregations(aggBuilder); + .subAggregation(new MaxAggregationBuilder(timeField).field(timeField))); + builder.setAggregations(AggProvider.fromParsedAggs(aggBuilder)); if (instance.getScriptFields().isEmpty() == false) { builder.setScriptFields(Collections.emptyList()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java new file mode 100644 index 0000000000000..9b0c17ae8e101 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/QueryProviderTests.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + + +public class QueryProviderTests extends AbstractSerializingTestCase { + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } + + @Override + protected QueryProvider createTestInstance() { + return createRandomValidQueryProvider(); + } + + @Override + protected Writeable.Reader instanceReader() { + return QueryProvider::fromStream; + } + + @Override + protected QueryProvider doParseInstance(XContentParser parser) throws IOException { + return QueryProvider.fromXContent(parser, false); + } + + public static QueryProvider createRandomValidQueryProvider() { + return createRandomValidQueryProvider(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)); + } + + public static QueryProvider createRandomValidQueryProvider(String field, String value) { + Map terms = Collections.singletonMap(BoolQueryBuilder.NAME, + Collections.singletonMap("filter", + Collections.singletonList( + Collections.singletonMap(TermQueryBuilder.NAME, + Collections.singletonMap(field, value))))); + return new QueryProvider( + terms, + QueryBuilders.boolQuery().filter(QueryBuilders.termQuery(field, value)), + null); + } + + public void testEmptyQueryMap() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{}"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> QueryProvider.fromXContent(parser, false)); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Datafeed query is not parsable")); + } + + public void testSerializationBetweenBugVersion() throws IOException { + QueryProvider tempQueryProvider = createRandomValidQueryProvider(); + QueryProvider queryProviderWithEx = new QueryProvider(tempQueryProvider.getQuery(), + tempQueryProvider.getParsedQuery(), + new IOException("ex")); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_6_6_0); + queryProviderWithEx.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { + in.setVersion(Version.V_6_6_0); + QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); + assertThat(streamedQueryProvider.getQuery(), equalTo(queryProviderWithEx.getQuery())); + assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); + + QueryBuilder streamedParsedQuery = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()) + .fromMap(streamedQueryProvider.getQuery()); + assertThat(streamedParsedQuery, equalTo(queryProviderWithEx.getParsedQuery())); + assertThat(streamedQueryProvider.getParsedQuery(), is(nullValue())); + } + } + } + + public void testSerializationBetweenEagerVersion() throws IOException { + QueryProvider validQueryProvider = createRandomValidQueryProvider(); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(Version.V_6_0_0); + validQueryProvider.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { + in.setVersion(Version.V_6_0_0); + + QueryProvider streamedQueryProvider = QueryProvider.fromStream(in); + XContentObjectTransformer transformer = XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); + Map sourceQueryMapWithDefaults = transformer.toMap(transformer.fromMap(validQueryProvider.getQuery())); + + assertThat(streamedQueryProvider.getQuery(), equalTo(sourceQueryMapWithDefaults)); + assertThat(streamedQueryProvider.getParsingException(), is(nullValue())); + assertThat(streamedQueryProvider.getParsedQuery(), equalTo(validQueryProvider.getParsedQuery())); + } + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), + validQueryProvider.getParsedQuery(), + new IOException("bad parsing")); + output.setVersion(Version.V_6_0_0); + IOException ex = expectThrows(IOException.class, () -> queryProviderWithEx.writeTo(output)); + assertThat(ex.getMessage(), equalTo("bad parsing")); + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + QueryProvider queryProviderWithEx = new QueryProvider(validQueryProvider.getQuery(), + validQueryProvider.getParsedQuery(), + new ElasticsearchException("bad parsing")); + output.setVersion(Version.V_6_0_0); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithEx.writeTo(output)); + assertNotNull(ex.getCause()); + assertThat(ex.getCause().getMessage(), equalTo("bad parsing")); + } + + try (BytesStreamOutput output = new BytesStreamOutput()) { + QueryProvider queryProviderWithOutParsed = new QueryProvider(validQueryProvider.getQuery(), null, null); + output.setVersion(Version.V_6_0_0); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> queryProviderWithOutParsed.writeTo(output)); + assertThat(ex.getMessage(), equalTo("Unsupported operation: parsed query is null")); + } + } + + @Override + protected QueryProvider mutateInstance(QueryProvider instance) throws IOException { + Exception parsingException = instance.getParsingException(); + QueryBuilder parsedQuery = instance.getParsedQuery(); + switch (between(0, 1)) { + case 0: + parsingException = parsingException == null ? new IOException("failed parsing") : null; + break; + case 1: + parsedQuery = parsedQuery == null ? + XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()).fromMap(instance.getQuery()) : + null; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new QueryProvider(instance.getQuery(), parsedQuery, parsingException); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e87515afadd1d..f5461a1abf3f8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -45,6 +45,8 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; + public class ElasticsearchMappingsTests extends ESTestCase { @@ -117,11 +119,12 @@ private void compareFields(Set expected, Set reserved) { @SuppressWarnings("unchecked") public void testTermFieldMapping() throws IOException { - XContentBuilder builder = ElasticsearchMappings.termFieldsMapping(null, Arrays.asList("apple", "strawberry", + XContentBuilder builder = ElasticsearchMappings.termFieldsMapping(Arrays.asList("apple", "strawberry", AnomalyRecord.BUCKET_SPAN.getPreferredName())); XContentParser parser = createParser(builder); - Map properties = (Map) parser.map().get(ElasticsearchMappings.PROPERTIES); + Map mapping = (Map) parser.map().get(SINGLE_MAPPING_NAME); + Map properties = (Map) mapping.get(ElasticsearchMappings.PROPERTIES); Map instanceMapping = (Map) properties.get("apple"); assertNotNull(instanceMapping); @@ -217,7 +220,7 @@ private ClusterState getClusterStateWithMappingsWithMetaData(Map } mapping.put("_meta", meta); - indexMetaData.putMapping(new MappingMetaData(ElasticsearchMappings.DOC_TYPE, mapping)); + indexMetaData.putMapping(new MappingMetaData("_doc", mapping)); metaDataBuilder.put(indexMetaData); } @@ -230,7 +233,7 @@ private ClusterState getClusterStateWithMappingsWithMetaData(Map private Set collectResultsDocFieldNames() throws IOException { // Only the mappings for the results index should be added below. Do NOT add mappings for other indexes here. - return collectFieldNames(ElasticsearchMappings.resultsMapping()); + return collectFieldNames(ElasticsearchMappings.resultsMapping("_doc")); } private Set collectConfigDocFieldNames() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformerTests.java index 1f61168c4200f..2e3a6e056ae3f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformerTests.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentFactory; @@ -16,6 +17,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -37,12 +39,19 @@ public class XContentObjectTransformerTests extends ESTestCase { + @Override + public NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testFromMap() throws IOException { Map aggMap = Collections.singletonMap("fieldName", Collections.singletonMap("max", Collections.singletonMap("field", "fieldName"))); - XContentObjectTransformer aggTransformer = XContentObjectTransformer.aggregatorTransformer(); + XContentObjectTransformer aggTransformer = + XContentObjectTransformer.aggregatorTransformer(xContentRegistry()); assertXContentAreEqual(aggTransformer.fromMap(aggMap), aggMap); assertXContentAreEqual(aggTransformer.fromMap(aggMap), aggTransformer.toMap(aggTransformer.fromMap(aggMap))); @@ -60,7 +69,8 @@ public void testFromMap() throws IOException { put("boost",1.0); }})); - XContentObjectTransformer queryBuilderTransformer = XContentObjectTransformer.queryBuilderTransformer(); + XContentObjectTransformer queryBuilderTransformer = + XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); assertXContentAreEqual(queryBuilderTransformer.fromMap(queryMap), queryMap); assertXContentAreEqual(queryBuilderTransformer.fromMap(queryMap), queryBuilderTransformer.toMap(queryBuilderTransformer.fromMap(queryMap))); @@ -73,7 +83,8 @@ public void testFromMapWithBadMaps() { put("type", "phrase"); //phrase stopped being supported for match in 6.x }})); - XContentObjectTransformer queryBuilderTransformer = XContentObjectTransformer.queryBuilderTransformer(); + XContentObjectTransformer queryBuilderTransformer = + XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); ParsingException exception = expectThrows(ParsingException.class, () -> queryBuilderTransformer.fromMap(queryMap)); @@ -85,14 +96,17 @@ public void testFromMapWithBadMaps() { put("field", "myField"); }})); - XContentObjectTransformer aggTransformer = XContentObjectTransformer.aggregatorTransformer(); + XContentObjectTransformer aggTransformer = + XContentObjectTransformer.aggregatorTransformer(xContentRegistry()); XContentParseException xContentParseException = expectThrows(XContentParseException.class, () -> aggTransformer.fromMap(aggMap)); assertThat(xContentParseException.getMessage(), containsString("[terms] failed to parse field [size]")); } public void testToMap() throws IOException { - XContentObjectTransformer aggTransformer = XContentObjectTransformer.aggregatorTransformer(); - XContentObjectTransformer queryBuilderTransformer = XContentObjectTransformer.queryBuilderTransformer(); + XContentObjectTransformer aggTransformer = + XContentObjectTransformer.aggregatorTransformer(xContentRegistry()); + XContentObjectTransformer queryBuilderTransformer = + XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()); AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); long aggHistogramInterval = randomNonNegativeLong(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 906b00ccab0fc..4af7dd2e57d62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -133,6 +133,12 @@ public void testSystem() throws Exception { assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync"), is(true)); assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync[p]"), is(true)); assertThat(predicate.test("indices:admin/seq_no/retention_lease_background_sync[r]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/add_retention_lease"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/add_retention_lease[s]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/remove_retention_lease"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/remove_retention_lease[s]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/renew_retention_lease"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/renew_retention_lease[s]"), is(true)); assertThat(predicate.test("indices:admin/settings/update"), is(true)); assertThat(predicate.test("indices:admin/settings/foo"), is(false)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 42144535a2f06..d7c44eaad165e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -225,12 +225,14 @@ public void testSnapshotUserRole() { assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) .test(randomAlphaOfLengthBetween(8, 24)), is(true)); - assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) - .test(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX), is(true)); - assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) - .test(RestrictedIndicesNames.SECURITY_INDEX_NAME), is(true)); - assertNoAccessAllowed(snapshotUserRole, RestrictedIndicesNames.NAMES_SET); + for (String index : RestrictedIndicesNames.RESTRICTED_NAMES) { + // This test might cease to be true if we ever have non-security restricted names + // but that depends on how users are supposed to perform snapshots of those new indices. + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(true)); + } + + assertNoAccessAllowed(snapshotUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testIngestAdminRole() { @@ -258,7 +260,7 @@ public void testIngestAdminRole() { assertThat(ingestAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); - assertNoAccessAllowed(ingestAdminRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(ingestAdminRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testKibanaSystemRole() { @@ -359,7 +361,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(false)); - assertNoAccessAllowed(kibanaRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(kibanaRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testKibanaUserRole() { @@ -396,7 +398,7 @@ public void testKibanaUserRole() { assertThat(kibanaUserRole.application().grants(new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), is(false)); - assertNoAccessAllowed(kibanaUserRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(kibanaUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testMonitoringUserRole() { @@ -440,7 +442,7 @@ public void testMonitoringUserRole() { assertThat(monitoringUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); assertThat(monitoringUserRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(index), is(true)); - assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(monitoringUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testRemoteMonitoringAgentRole() { @@ -499,7 +501,7 @@ public void testRemoteMonitoringAgentRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(metricbeatIndex), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME).test(metricbeatIndex), is(false)); - assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testRemoteMonitoringCollectorRole() { @@ -547,36 +549,41 @@ public void testRemoteMonitoringCollectorRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(false)); }); + // These tests might need to change if we add new non-security restricted indices that the monitoring user isn't supposed to see + // (but ideally, the monitoring user should see all indices). assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetSettingsAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesShardStoresAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(UpgradeStatusAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(RecoveryAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesStatsAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndicesSegmentsAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(true)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(true)); + assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(SearchAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(GetAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(DeleteAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME) - .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + .test(randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES)), is(false)); assertMonitoringOnRestrictedIndices(remoteMonitoringAgentRole); - assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.RESTRICTED_NAMES); } private void assertMonitoringOnRestrictedIndices(Role role) { final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); final MetaData metaData = new MetaData.Builder() - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder(internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -588,9 +595,9 @@ private void assertMonitoringOnRestrictedIndices(Role role) { GetSettingsAction.NAME, IndicesShardStoresAction.NAME, UpgradeStatusAction.NAME, RecoveryAction.NAME); for (final String indexMonitoringActionName : indexMonitoringActionNamesList) { final Map authzMap = role.indices().authorize(indexMonitoringActionName, - Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), + Sets.newHashSet(internalSecurityIndex, RestrictedIndicesNames.SECURITY_INDEX_NAME), metaData.getAliasAndIndexLookup(), fieldPermissionsCache); - assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); + assertThat(authzMap.get(internalSecurityIndex).isGranted(), is(true)); assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); } } @@ -632,7 +639,7 @@ public void testReportingUserRole() { assertThat(reportingUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); assertThat(reportingUserRole.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); - assertNoAccessAllowed(reportingUserRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(reportingUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testKibanaDashboardOnlyUserRole() { @@ -666,7 +673,7 @@ public void testKibanaDashboardOnlyUserRole() { assertThat(dashboardsOnlyUserRole.application().grants( new ApplicationPrivilege(applicationWithRandomIndex, "app-random-index", "all"), "*"), is(false)); - assertNoAccessAllowed(dashboardsOnlyUserRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(dashboardsOnlyUserRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testSuperuserRole() { @@ -685,6 +692,8 @@ public void testSuperuserRole() { assertThat(superuserRole.cluster().check("internal:admin/foo", request), is(false)); final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); final MetaData metaData = new MetaData.Builder() .put(new IndexMetaData.Builder("a1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("a2").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) @@ -697,7 +706,7 @@ public void testSuperuserRole() { .putAlias(new AliasMetaData.Builder("ab").build()) .putAlias(new AliasMetaData.Builder("ba").build()) .build(), true) - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder(internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -725,7 +734,7 @@ public void testSuperuserRole() { authzMap = superuserRole.indices().authorize(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME), Sets.newHashSet(RestrictedIndicesNames.SECURITY_INDEX_NAME), lookup, fieldPermissionsCache); assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); - assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); + assertThat(authzMap.get(internalSecurityIndex).isGranted(), is(true)); assertTrue(superuserRole.indices().check(SearchAction.NAME)); assertFalse(superuserRole.indices().check("unknown")); @@ -734,7 +743,7 @@ public void testSuperuserRole() { assertThat(superuserRole.indices().allowedIndicesMatcher(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME)) .test(RestrictedIndicesNames.SECURITY_INDEX_NAME), is(true)); assertThat(superuserRole.indices().allowedIndicesMatcher(randomFrom(IndexAction.NAME, DeleteIndexAction.NAME, SearchAction.NAME)) - .test(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX), is(true)); + .test(internalSecurityIndex), is(true)); } public void testLogstashSystemRole() { @@ -760,7 +769,7 @@ public void testLogstashSystemRole() { assertThat(logstashSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); - assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testBeatsAdminRole() { @@ -798,7 +807,7 @@ public void testBeatsAdminRole() { assertThat(beatsAdminRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); assertThat(beatsAdminRole.indices().allowedIndicesMatcher(GetAction.NAME).test(index), is(true)); - assertNoAccessAllowed(beatsAdminRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(beatsAdminRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testBeatsSystemRole() { @@ -824,7 +833,7 @@ public void testBeatsSystemRole() { assertThat(logstashSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); - assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(logstashSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testAPMSystemRole() { @@ -850,7 +859,7 @@ public void testAPMSystemRole() { assertThat(APMSystemRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); - assertNoAccessAllowed(APMSystemRole, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(APMSystemRole, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testAPMUserRole() { @@ -938,7 +947,7 @@ public void testMachineLearningAdminRole() { assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); - assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testMachineLearningUserRole() { @@ -1009,7 +1018,7 @@ public void testMachineLearningUserRole() { assertOnlyReadAllowed(role, AuditorField.NOTIFICATIONS_INDEX); assertReadWriteDocsButNotDeleteIndexAllowed(role, AnnotationIndex.INDEX_NAME); - assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testWatcherAdminRole() { @@ -1038,7 +1047,7 @@ public void testWatcherAdminRole() { assertOnlyReadAllowed(role, index); } - assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); } public void testWatcherUserRole() { @@ -1068,7 +1077,7 @@ public void testWatcherUserRole() { assertOnlyReadAllowed(role, index); } - assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); } private void assertReadWriteDocsButNotDeleteIndexAllowed(Role role, String index) { @@ -1092,7 +1101,7 @@ private void assertOnlyReadAllowed(Role role, String index) { assertThat(role.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); assertThat(role.indices().allowedIndicesMatcher(BulkAction.NAME).test(index), is(false)); - assertNoAccessAllowed(role, RestrictedIndicesNames.NAMES_SET); + assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); } private void assertNoAccessAllowed(Role role, Collection indices) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java index 32f75f56da2a9..109722c37c086 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java @@ -5,15 +5,11 @@ */ package org.elasticsearch.xpack.core.ssl; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; -import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; -import org.junit.BeforeClass; import javax.net.ssl.X509ExtendedTrustManager; @@ -32,7 +28,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.regex.Pattern; @@ -45,34 +40,6 @@ public class RestrictedTrustManagerTests extends ESTestCase { private int numberOfClusters; private int numberOfNodes; - private static Locale restoreLocale; - - @BeforeClass - public static void ensureSupportedLocale() throws Exception { - Logger logger = LogManager.getLogger(RestrictedTrustManagerTests.class); - if (isUnusableLocale()) { - // See: https://github.com/elastic/elasticsearch/issues/33081 - logger.warn("Attempting to run RestrictedTrustManagerTests tests in an unusable locale in a FIPS JVM. Certificate expiration " + - "validation will fail, switching to English"); - restoreLocale = Locale.getDefault(); - Locale.setDefault(Locale.ENGLISH); - } - } - - private static boolean isUnusableLocale() { - return inFipsJvm() && (Locale.getDefault().toLanguageTag().equals("th-TH") - || Locale.getDefault().toLanguageTag().equals("ja-JP-u-ca-japanese-x-lvariant-JP") - || Locale.getDefault().toLanguageTag().equals("th-TH-u-nu-thai-x-lvariant-TH")); - } - - @AfterClass - public static void restoreLocale() throws Exception { - if (restoreLocale != null) { - Locale.setDefault(restoreLocale); - restoreLocale = null; - } - } - @Before public void readCertificates() throws GeneralSecurityException, IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 674e14ca0e196..83b1d80f563a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -5,9 +5,24 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.apache.http.HttpConnectionMetrics; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpException; +import org.apache.http.HttpRequest; +import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.HttpConnectionFactory; +import org.apache.http.conn.ManagedHttpClientConnection; +import org.apache.http.conn.routing.HttpRoute; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.socket.PlainConnectionSocketFactory; +import org.apache.http.conn.ssl.DefaultHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.ManagedHttpClientConnectionFactory; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.ssl.SSLContextBuilder; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.settings.MockSecureSettings; @@ -26,9 +41,13 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.net.InetAddress; +import java.net.Socket; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; @@ -47,6 +66,8 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.hamcrest.Matchers.containsString; @@ -91,7 +112,6 @@ public void testReloadingKeyStore() throws Exception { final Settings settings = Settings.builder() .put("path.home", createTempDir()) .put("xpack.security.transport.ssl.keystore.path", keystorePath) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .setSecureSettings(secureSettings) .build(); final Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); @@ -150,7 +170,6 @@ public void testPEMKeyConfigReloading() throws Exception { .put("xpack.security.transport.ssl.key", keyPath) .put("xpack.security.transport.ssl.certificate", certPath) .putList("xpack.security.transport.ssl.certificate_authorities", certPath.toString()) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .setSecureSettings(secureSettings) .build(); final Environment env = randomBoolean() ? null : @@ -207,7 +226,6 @@ public void testReloadingTrustStore() throws Exception { secureSettings.setString("xpack.security.transport.ssl.truststore.secure_password", "testnode"); Settings settings = Settings.builder() .put("xpack.security.transport.ssl.truststore.path", trustStorePath) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .put("path.home", createTempDir()) .setSecureSettings(secureSettings) .build(); @@ -215,7 +233,7 @@ public void testReloadingTrustStore() throws Exception { // Create the MockWebServer once for both pre and post checks try (MockWebServer server = getSslServer(trustStorePath, "testnode")) { final Consumer trustMaterialPreChecks = (context) -> { - try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { + try (CloseableHttpClient client = createHttpClient(context)) { privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); } catch (Exception e) { throw new RuntimeException("Error connecting to the mock server", e); @@ -232,7 +250,7 @@ public void testReloadingTrustStore() throws Exception { // Client's truststore doesn't contain the server's certificate anymore so SSLHandshake should fail final Consumer trustMaterialPostChecks = (updatedContext) -> { - try (CloseableHttpClient client = HttpClients.custom().setSSLContext(updatedContext).build()) { + try (CloseableHttpClient client = createHttpClient(updatedContext)) { SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); assertThat(sslException.getCause().getMessage(), containsString("PKIX path building failed")); @@ -259,14 +277,13 @@ public void testReloadingPEMTrustConfig() throws Exception { Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_updated.crt"), updatedCert); Settings settings = Settings.builder() .putList("xpack.security.transport.ssl.certificate_authorities", serverCertPath.toString()) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .put("path.home", createTempDir()) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); // Create the MockWebServer once for both pre and post checks try (MockWebServer server = getSslServer(serverKeyPath, serverCertPath, "testnode")) { final Consumer trustMaterialPreChecks = (context) -> { - try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { + try (CloseableHttpClient client = createHttpClient(context)) { privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())));//.close()); } catch (Exception e) { throw new RuntimeException("Exception connecting to the mock server", e); @@ -283,7 +300,7 @@ public void testReloadingPEMTrustConfig() throws Exception { // Client doesn't trust the Server certificate anymore so SSLHandshake should fail final Consumer trustMaterialPostChecks = (updatedContext) -> { - try (CloseableHttpClient client = HttpClients.custom().setSSLContext(updatedContext).build()) { + try (CloseableHttpClient client = createHttpClient(updatedContext)) { SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); assertThat(sslException.getCause().getMessage(), containsString("PKIX path validation failed")); @@ -308,27 +325,37 @@ public void testReloadingKeyStoreException() throws Exception { secureSettings.setString("xpack.security.transport.ssl.keystore.secure_password", "testnode"); Settings settings = Settings.builder() .put("xpack.security.transport.ssl.keystore.path", keystorePath) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .setSecureSettings(secureSettings) .put("path.home", createTempDir()) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.security.transport.ssl."); + final AtomicReference exceptionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { - fail("reload should not be called! [keystore reload exception]"); + try { + super.reloadSSLContext(configuration); + } catch (Exception e) { + exceptionRef.set(e); + throw e; + } finally { + latch.countDown(); + } } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the keystore - try (OutputStream out = Files.newOutputStream(keystorePath, StandardOpenOption.TRUNCATE_EXISTING)) { + try (OutputStream ignore = Files.newOutputStream(keystorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } - // we intentionally don't wait here as we rely on concurrency to catch a failure + latch.await(); + assertNotNull(exceptionRef.get()); + assertThat(exceptionRef.get().getMessage(), containsString("failed to initialize a KeyManagerFactory")); assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } @@ -350,27 +377,37 @@ public void testReloadingPEMKeyConfigException() throws Exception { .put("xpack.security.transport.ssl.key", keyPath) .put("xpack.security.transport.ssl.certificate", certPath) .putList("xpack.security.transport.ssl.certificate_authorities", certPath.toString(), clientCertPath.toString()) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .put("path.home", createTempDir()) .setSecureSettings(secureSettings) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.security.transport.ssl."); + final AtomicReference exceptionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { - fail("reload should not be called! [pem key reload exception]"); + try { + super.reloadSSLContext(configuration); + } catch (Exception e) { + exceptionRef.set(e); + throw e; + } finally { + latch.countDown(); + } } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the file - try (OutputStream os = Files.newOutputStream(keyPath, StandardOpenOption.TRUNCATE_EXISTING)) { + try (OutputStream ignore = Files.newOutputStream(keyPath, StandardOpenOption.TRUNCATE_EXISTING)) { } - // we intentionally don't wait here as we rely on concurrency to catch a failure + latch.await(); + assertNotNull(exceptionRef.get()); + assertThat(exceptionRef.get().getMessage(), containsString("Error parsing Private Key")); assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } @@ -386,27 +423,37 @@ public void testTrustStoreReloadException() throws Exception { secureSettings.setString("xpack.security.transport.ssl.truststore.secure_password", "testnode"); Settings settings = Settings.builder() .put("xpack.security.transport.ssl.truststore.path", trustStorePath) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .put("path.home", createTempDir()) .setSecureSettings(secureSettings) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.getSSLConfiguration("xpack.security.transport.ssl."); + final AtomicReference exceptionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { - fail("reload should not be called! [truststore reload exception]"); + try { + super.reloadSSLContext(configuration); + } catch (Exception e) { + exceptionRef.set(e); + throw e; + } finally { + latch.countDown(); + } } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the truststore - try (OutputStream os = Files.newOutputStream(trustStorePath, StandardOpenOption.TRUNCATE_EXISTING)) { + try (OutputStream ignore = Files.newOutputStream(trustStorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } - // we intentionally don't wait here as we rely on concurrency to catch a failure + latch.await(); + assertNotNull(exceptionRef.get()); + assertThat(exceptionRef.get().getMessage(), containsString("failed to initialize a TrustManagerFactory")); assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } @@ -420,16 +467,24 @@ public void testPEMTrustReloadException() throws Exception { Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), clientCertPath); Settings settings = Settings.builder() .putList("xpack.security.transport.ssl.certificate_authorities", clientCertPath.toString()) - .put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2") .put("path.home", createTempDir()) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.sslConfiguration(settings.getByPrefix("xpack.security.transport.ssl.")); + final AtomicReference exceptionRef = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); new SSLConfigurationReloader(env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { - fail("reload should not be called! [pem trust reload exception]"); + try { + super.reloadSSLContext(configuration); + } catch (Exception e) { + exceptionRef.set(e); + throw e; + } finally { + latch.countDown(); + } } }; @@ -442,9 +497,10 @@ void reloadSSLContext(SSLConfiguration configuration) { } atomicMoveIfPossible(updatedCert, clientCertPath); - // we intentionally don't wait here as we rely on concurrency to catch a failure + latch.await(); + assertNotNull(exceptionRef.get()); + assertThat(exceptionRef.get().getMessage(), containsString("failed to initialize a TrustManagerFactory")); assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); - } private void validateSSLConfigurationIsReloaded(Settings settings, Environment env, Consumer preChecks, @@ -490,7 +546,6 @@ private static MockWebServer getSslServer(Path keyStorePath, String keyStorePass } final SSLContext sslContext = new SSLContextBuilder() .loadKeyMaterial(keyStore, keyStorePass.toCharArray()) - .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -506,7 +561,6 @@ private static MockWebServer getSslServer(Path keyPath, Path certPath, String pa CertParsingUtils.readCertificates(Collections.singletonList(certPath))); final SSLContext sslContext = new SSLContextBuilder() .loadKeyMaterial(keyStore, password.toCharArray()) - .setProtocol("TLSv1.2") .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -523,9 +577,8 @@ private static CloseableHttpClient getSSLClient(Path trustStorePath, String trus } final SSLContext sslContext = new SSLContextBuilder() .loadTrustMaterial(trustStore, null) - .setProtocol("TLSv1.2") .build(); - return HttpClients.custom().setSSLContext(sslContext).build(); + return createHttpClient(sslContext); } /** @@ -543,9 +596,138 @@ private static CloseableHttpClient getSSLClient(List trustedCertificatePat } final SSLContext sslContext = new SSLContextBuilder() .loadTrustMaterial(trustStore, null) - .setProtocol("TLSv1.2") .build(); - return HttpClients.custom().setSSLContext(sslContext).build(); + return createHttpClient(sslContext); + } + + private static CloseableHttpClient createHttpClient(SSLContext sslContext) { + return HttpClients.custom() + .setConnectionManager(new PoolingHttpClientConnectionManager( + RegistryBuilder.create() + .register("http", PlainConnectionSocketFactory.getSocketFactory()) + .register("https", new SSLConnectionSocketFactory(sslContext, null, null, new DefaultHostnameVerifier())) + .build(), getHttpClientConnectionFactory(), null, null, -1, TimeUnit.MILLISECONDS)) + .build(); + } + + /** + * Creates our own HttpConnectionFactory that changes how the connection is closed to prevent issues with + * the MockWebServer going into an endless loop based on the way that HttpClient closes its connection. + */ + private static HttpConnectionFactory getHttpClientConnectionFactory() { + return (route, config) -> { + ManagedHttpClientConnection delegate = ManagedHttpClientConnectionFactory.INSTANCE.create(route, config); + return new ManagedHttpClientConnection() { + @Override + public String getId() { + return delegate.getId(); + } + + @Override + public void bind(Socket socket) throws IOException { + delegate.bind(socket); + } + + @Override + public Socket getSocket() { + return delegate.getSocket(); + } + + @Override + public SSLSession getSSLSession() { + return delegate.getSSLSession(); + } + + @Override + public boolean isResponseAvailable(int timeout) throws IOException { + return delegate.isResponseAvailable(timeout); + } + + @Override + public void sendRequestHeader(HttpRequest request) throws HttpException, IOException { + delegate.sendRequestHeader(request); + } + + @Override + public void sendRequestEntity(HttpEntityEnclosingRequest request) throws HttpException, IOException { + delegate.sendRequestEntity(request); + } + + @Override + public HttpResponse receiveResponseHeader() throws HttpException, IOException { + return delegate.receiveResponseHeader(); + } + + @Override + public void receiveResponseEntity(HttpResponse response) throws HttpException, IOException { + delegate.receiveResponseEntity(response); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public InetAddress getLocalAddress() { + return delegate.getLocalAddress(); + } + + @Override + public int getLocalPort() { + return delegate.getLocalPort(); + } + + @Override + public InetAddress getRemoteAddress() { + return delegate.getRemoteAddress(); + } + + @Override + public int getRemotePort() { + return delegate.getRemotePort(); + } + + @Override + public void close() throws IOException { + if (delegate.getSocket() instanceof SSLSocket) { + try (SSLSocket socket = (SSLSocket) delegate.getSocket()) { + } + } + delegate.close(); + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public boolean isStale() { + return delegate.isStale(); + } + + @Override + public void setSocketTimeout(int timeout) { + delegate.setSocketTimeout(timeout); + } + + @Override + public int getSocketTimeout() { + return delegate.getSocketTimeout(); + } + + @Override + public void shutdown() throws IOException { + delegate.shutdown(); + } + + @Override + public HttpConnectionMetrics getMetrics() { + return delegate.getMetrics(); + } + }; + }; } private static void privilegedConnect(CheckedRunnable runnable) throws Exception { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index ae48de463e8f3..4dcefd05aff36 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -707,7 +707,7 @@ public void testThatSSLContextWithoutSettingsWorks() throws Exception { SSLContext sslContext = sslService.sslContext(sslService.sslConfiguration(Settings.EMPTY)); try (CloseableHttpClient client = HttpClients.custom().setSSLContext(sslContext).build()) { // Execute a GET on a site known to have a valid certificate signed by a trusted public CA - // This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default + // This will result in an SSLHandshakeException if the SSLContext does not trust the CA, but the default // truststore trusts all common public CAs so the handshake will succeed privilegedConnect(() -> client.execute(new HttpGet("https://www.elastic.co/")).close()); } @@ -740,7 +740,7 @@ public void testThatSSLIOSessionStrategyWithoutSettingsWorks() throws Exception client.start(); // Execute a GET on a site known to have a valid certificate signed by a trusted public CA - // This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default + // This will result in an SSLHandshakeException if the SSLContext does not trust the CA, but the default // truststore trusts all common public CAs so the handshake will succeed client.execute(new HttpHost("elastic.co", 443, "https"), new HttpGet("/"), new AssertionCallback()).get(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java index ac73418800c77..3cb14180930d3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheckTests.java @@ -5,40 +5,115 @@ */ package org.elasticsearch.xpack.core.ssl; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.License; +import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.license.TestUtils; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; -import java.util.EnumSet; - public class TLSLicenseBootstrapCheckTests extends AbstractBootstrapCheckTestCase { - public void testBootstrapCheck() throws Exception { + public void testBootstrapCheckOnEmptyMetadata() { assertTrue(new TLSLicenseBootstrapCheck().check(emptyContext).isSuccess()); assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext(Settings.builder().put("xpack.security.transport.ssl.enabled" - , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); - int numIters = randomIntBetween(1,10); - for (int i = 0; i < numIters; i++) { - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - MetaData.Builder builder = MetaData.builder(); - TestUtils.putLicense(builder, license); - MetaData build = builder.build(); - if (productionModes.contains(license.operationMode()) == false) { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", true).build(), build)).isSuccess()); - } else { - assertTrue(new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).isFailure()); - assertEquals("Transport SSL must be enabled for setups with production licenses. Please set " + - "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + - "[xpack.security.enabled] to [false]", - new TLSLicenseBootstrapCheck().check(createTestContext( - Settings.builder().put("xpack.security.transport.ssl.enabled", false).build(), build)).getMessage()); - } + , randomBoolean()).build(), MetaData.EMPTY_META_DATA)).isSuccess()); + } + + public void testBootstrapCheckFailureOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + if (randomBoolean()) { + // randomise between default-true & explicit-true + settings.put("xpack.security.enabled", true); + } + + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [" + mode.description() + "] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapCheckSucceedsWithTlsEnabledOnPremiumLicense() throws Exception { + final OperationMode mode = randomFrom(OperationMode.PLATINUM, OperationMode.GOLD, OperationMode.STANDARD); + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(mode, settings); + assertSuccess(result); + } + + public void testBootstrapCheckFailureOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.enabled", true); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.transport.ssl.enabled", false); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertTrue("Expected bootstrap failure", result.isFailure()); + assertEquals("Transport SSL must be enabled if security is enabled on a [basic] license. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting " + + "[xpack.security.enabled] to [false]", + result.getMessage()); + } + + public void testBootstrapSucceedsIfSecurityIsNotEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // randomise between default-false & explicit-false + settings.put("xpack.security.enabled", false); + } + if (randomBoolean()) { + // it does not matter whether or not this is set, as security is not enabled. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); } + + public void testBootstrapSucceedsIfTlsIsEnabledOnBasicLicense() throws Exception { + final Settings.Builder settings = Settings.builder().put("xpack.security.transport.ssl.enabled", true); + if (randomBoolean()) { + // it does not matter whether or not this is set, as TLS is enabled. + settings.put("xpack.security.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.BASIC, settings); + assertSuccess(result); + } + + public void testBootstrapCheckAlwaysSucceedsOnTrialLicense() throws Exception { + final Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.enabled", randomBoolean()); + } + if (randomBoolean()) { + // it does not matter whether this is set, or to which value. + settings.put("xpack.security.transport.ssl.enabled", randomBoolean()); + } + final BootstrapCheck.BootstrapCheckResult result = runBootstrapCheck(OperationMode.TRIAL, settings); + assertSuccess(result); + } + + public BootstrapCheck.BootstrapCheckResult runBootstrapCheck(OperationMode mode, Settings.Builder settings) throws Exception { + final License license = TestUtils.generateSignedLicense(mode.description(), TimeValue.timeValueHours(24)); + MetaData.Builder builder = MetaData.builder(); + TestUtils.putLicense(builder, license); + MetaData metaData = builder.build(); + final BootstrapContext context = createTestContext(settings.build(), metaData); + return new TLSLicenseBootstrapCheck().check(context); + } + + public void assertSuccess(BootstrapCheck.BootstrapCheckResult result) { + if (result.isFailure()) { + fail("Bootstrap check failed unexpectedly: " + result.getMessage()); + } + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java index 863908ab7fa72..28d0ccd682f52 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; @@ -27,6 +26,8 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.test.rest.ESRestTestCase.allowTypesRemovalWarnings; + public final class XPackRestTestHelper { public static final List ML_PRE_V660_TEMPLATES = Collections.unmodifiableList( @@ -78,7 +79,7 @@ public static void waitForTemplates(RestClient client, List templateName Map response; try { final Request getRequest = new Request("GET", "_template/" + template); - getRequest.setOptions(ESRestTestCase.allowTypeRemovalWarnings()); + getRequest.setOptions(allowTypesRemovalWarnings()); String string = EntityUtils.toString(client.performRequest(getRequest).getEntity()); response = XContentHelper.convertToMap(JsonXContent.jsonXContent, string, false); } catch (ResponseException e) { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java new file mode 100644 index 0000000000000..1a71f094fc132 --- /dev/null +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.elasticsearch.search.SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING; + +public class ClusterDeprecationChecks { + private static final Logger logger = LogManager.getLogger(ClusterDeprecationChecks.class); + + @SuppressWarnings("unchecked") + static DeprecationIssue checkUserAgentPipelines(ClusterState state) { + List pipelines = IngestService.getPipelines(state); + + List pipelinesWithDeprecatedEcsConfig = pipelines.stream() + .filter(Objects::nonNull) + .filter(pipeline -> { + Map pipelineConfig = pipeline.getConfigAsMap(); + + List>> processors = + (List>>) pipelineConfig.get("processors"); + return processors.stream() + .filter(Objects::nonNull) + .filter(processor -> processor.containsKey("user_agent")) + .map(processor -> processor.get("user_agent")) + .anyMatch(processorConfig -> processorConfig.containsKey("ecs")); + }) + .map(PipelineConfiguration::getId) + .sorted() // Make the warning consistent for testing purposes + .collect(Collectors.toList()); + if (pipelinesWithDeprecatedEcsConfig.isEmpty() == false) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "User-Agent ingest plugin will always use ECS-formatted output", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-8.0.html" + + "#ingest-user-agent-ecs-always", + "Ingest pipelines " + pipelinesWithDeprecatedEcsConfig + " uses the [ecs] option which needs to be removed to work in 8.0"); + } + return null; + + } + + static DeprecationIssue checkTemplatesWithTooManyFields(ClusterState state) { + Integer maxClauseCount = INDICES_MAX_CLAUSE_COUNT_SETTING.get(state.getMetaData().settings()); + List templatesOverLimit = new ArrayList<>(); + state.getMetaData().getTemplates().forEach((templateCursor) -> { + AtomicInteger maxFields = new AtomicInteger(0); + String templateName = templateCursor.key; + boolean defaultFieldSet = templateCursor.value.settings().get(IndexSettings.DEFAULT_FIELD_SETTING.getKey()) != null; + templateCursor.value.getMappings().forEach((mappingCursor) -> { + MappingMetaData mappingMetaData = new MappingMetaData(mappingCursor.value); + if (mappingMetaData != null && defaultFieldSet == false) { + maxFields.set(IndexDeprecationChecks.countFieldsRecursively(mappingMetaData.type(), mappingMetaData.sourceAsMap())); + } + if (maxFields.get() > maxClauseCount) { + templatesOverLimit.add(templateName); + } + }); + }); + + if (templatesOverLimit.isEmpty() == false) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "Fields in index template exceed automatic field expansion limit", + "https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html" + + "#_limiting_the_number_of_auto_expanded_fields", + "Index templates " + templatesOverLimit + " have a number of fields which exceeds the automatic field expansion " + + "limit of [" + maxClauseCount + "] and does not have [" + IndexSettings.DEFAULT_FIELD_SETTING.getKey() + "] set, " + + "which may cause queries which use automatic field expansion, such as query_string, simple_query_string, and " + + "multi_match to fail if fields are not explicitly specified in the query."); + } + return null; + } +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 97e273b213b79..b70c7c4fa32cd 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -9,6 +9,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -31,7 +32,11 @@ private DeprecationChecks() { } static List> CLUSTER_SETTINGS_CHECKS = - Collections.emptyList(); + Collections.unmodifiableList(Arrays.asList( + ClusterDeprecationChecks::checkUserAgentPipelines, + ClusterDeprecationChecks::checkTemplatesWithTooManyFields + )); + static List> NODE_SETTINGS_CHECKS = Collections.unmodifiableList(Arrays.asList( @@ -40,9 +45,11 @@ private DeprecationChecks() { static List> INDEX_SETTINGS_CHECKS = Collections.unmodifiableList(Arrays.asList( - IndexDeprecationChecks::oldIndicesCheck)); + IndexDeprecationChecks::oldIndicesCheck, + IndexDeprecationChecks::tooManyFieldsCheck + )); - static List> ML_SETTINGS_CHECKS = + static List> ML_SETTINGS_CHECKS = Collections.unmodifiableList(Arrays.asList( MlDeprecationChecks::checkDataFeedAggregations, MlDeprecationChecks::checkDataFeedQuery diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 7defb80ccaa6a..4a56cb78dd144 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -10,11 +10,16 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Function; @@ -40,7 +45,7 @@ private static void fieldLevelMappingIssue(IndexMetaData indexMetaData, BiConsum * @return a list of issues found in fields */ @SuppressWarnings("unchecked") - private static List findInPropertiesRecursively(String type, Map parentMap, + static List findInPropertiesRecursively(String type, Map parentMap, Function, Boolean> predicate) { List issues = new ArrayList<>(); Map properties = (Map) parentMap.get("properties"); @@ -84,4 +89,77 @@ static DeprecationIssue oldIndicesCheck(IndexMetaData indexMetaData) { } return null; } + + static DeprecationIssue tooManyFieldsCheck(IndexMetaData indexMetaData) { + if (indexMetaData.getSettings().get(IndexSettings.DEFAULT_FIELD_SETTING.getKey()) == null) { + AtomicInteger fieldCount = new AtomicInteger(0); + + fieldLevelMappingIssue(indexMetaData, ((mappingMetaData, sourceAsMap) -> { + fieldCount.addAndGet(countFieldsRecursively(mappingMetaData.type(), sourceAsMap)); + })); + + // We can't get to the setting `indices.query.bool.max_clause_count` from here, so just check the default of that setting. + // It's also much better practice to set `index.query.default_field` than `indices.query.bool.max_clause_count` - there's a + // reason we introduced the limit. + if (fieldCount.get() > 1024) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "Number of fields exceeds automatic field expansion limit", + "https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html" + + "#_limiting_the_number_of_auto_expanded_fields", + "This index has [" + fieldCount.get() + "] fields, which exceeds the automatic field expansion limit of 1024 " + + "and does not have [" + IndexSettings.DEFAULT_FIELD_SETTING.getKey() + "] set, which may cause queries which use " + + "automatic field expansion, such as query_string, simple_query_string, and multi_match to fail if fields are not " + + "explicitly specified in the query."); + } + } + return null; + } + + + private static final Set TYPES_THAT_DONT_COUNT; + static { + HashSet typesThatDontCount = new HashSet<>(); + typesThatDontCount.add("binary"); + typesThatDontCount.add("geo_point"); + typesThatDontCount.add("geo_shape"); + TYPES_THAT_DONT_COUNT = Collections.unmodifiableSet(typesThatDontCount); + } + /* Counts the number of fields in a mapping, designed to count the as closely as possible to + * org.elasticsearch.index.search.QueryParserHelper#checkForTooManyFields + */ + @SuppressWarnings("unchecked") + static int countFieldsRecursively(String type, Map parentMap) { + int fields = 0; + Map properties = (Map) parentMap.get("properties"); + if (properties == null) { + return fields; + } + for (Map.Entry entry : properties.entrySet()) { + Map valueMap = (Map) entry.getValue(); + if (valueMap.containsKey("type") + && (valueMap.get("type").equals("object") && valueMap.containsKey("properties") == false) == false + && (TYPES_THAT_DONT_COUNT.contains(valueMap.get("type")) == false)) { + fields++; + } + + Map values = (Map) valueMap.get("fields"); + if (values != null) { + for (Map.Entry multifieldEntry : values.entrySet()) { + Map multifieldValueMap = (Map) multifieldEntry.getValue(); + if (multifieldValueMap.containsKey("type") + && (TYPES_THAT_DONT_COUNT.contains(valueMap.get("type")) == false)) { + fields++; + } + if (multifieldValueMap.containsKey("properties")) { + fields += countFieldsRecursively(type, multifieldValueMap); + } + } + } + if (valueMap.containsKey("properties")) { + fields += countFieldsRecursively(type, valueMap); + } + } + + return fields; + } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecks.java index 187a8669574cd..ebcf160baeedd 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecks.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -18,8 +19,8 @@ final class MlDeprecationChecks { private MlDeprecationChecks() { } - static DeprecationIssue checkDataFeedQuery(DatafeedConfig datafeedConfig) { - List deprecations = datafeedConfig.getQueryDeprecations(); + static DeprecationIssue checkDataFeedQuery(DatafeedConfig datafeedConfig, NamedXContentRegistry xContentRegistry) { + List deprecations = datafeedConfig.getQueryDeprecations(xContentRegistry); if (deprecations.isEmpty()) { return null; } else { @@ -30,8 +31,8 @@ static DeprecationIssue checkDataFeedQuery(DatafeedConfig datafeedConfig) { } } - static DeprecationIssue checkDataFeedAggregations(DatafeedConfig datafeedConfig) { - List deprecations = datafeedConfig.getAggDeprecations(); + static DeprecationIssue checkDataFeedAggregations(DatafeedConfig datafeedConfig, NamedXContentRegistry xContentRegistry) { + List deprecations = datafeedConfig.getAggDeprecations(xContentRegistry); if (deprecations.isEmpty()) { return null; } else { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index bac290d41a5eb..6556a8ad0eff5 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; @@ -48,18 +49,20 @@ public class TransportDeprecationInfoAction extends TransportMasterNodeReadActio private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final Settings settings; + private final NamedXContentRegistry xContentRegistry; @Inject public TransportDeprecationInfoAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - XPackLicenseState licenseState, NodeClient client) { + XPackLicenseState licenseState, NodeClient client, NamedXContentRegistry xContentRegistry) { super(DeprecationInfoAction.NAME, transportService, clusterService, threadPool, actionFilters, DeprecationInfoAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.settings = settings; + this.xContentRegistry = xContentRegistry; } @Override @@ -99,7 +102,7 @@ protected final void masterOperation(final DeprecationInfoAction.Request request getDatafeedConfigs(ActionListener.wrap( datafeeds -> { listener.onResponse( - DeprecationInfoAction.Response.from(state, indexNameExpressionResolver, + DeprecationInfoAction.Response.from(state, xContentRegistry, indexNameExpressionResolver, request.indices(), request.indicesOptions(), datafeeds, response, INDEX_SETTINGS_CHECKS, CLUSTER_SETTINGS_CHECKS, ML_SETTINGS_CHECKS)); diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java new file mode 100644 index 0000000000000..990958e766c44 --- /dev/null +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS; +import static org.elasticsearch.xpack.deprecation.IndexDeprecationChecksTests.addRandomFields; + +public class ClusterDeprecationChecksTests extends ESTestCase { + + public void testUserAgentEcsCheck() { + PutPipelineRequest ecsFalseRequest = new PutPipelineRequest("ecs_false", + new BytesArray("{\n" + + " \"description\" : \"This has ecs set to false\",\n" + + " \"processors\" : [\n" + + " {\n" + + " \"user_agent\" : {\n" + + " \"field\" : \"agent\",\n" + + " \"ecs\" : false\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"), XContentType.JSON); + PutPipelineRequest ecsNullRequest = new PutPipelineRequest("ecs_null", + new BytesArray("{\n" + + " \"description\" : \"This has ecs set to false\",\n" + + " \"processors\" : [\n" + + " {\n" + + " \"user_agent\" : {\n" + + " \"field\" : \"agent\"\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"), XContentType.JSON); + PutPipelineRequest ecsTrueRequest = new PutPipelineRequest("ecs_true", + new BytesArray("{\n" + + " \"description\" : \"This has ecs set to false\",\n" + + " \"processors\" : [\n" + + " {\n" + + " \"user_agent\" : {\n" + + " \"field\" : \"agent\",\n" + + " \"ecs\" : true\n" + + " }\n" + + " }\n" + + " ]\n" + + "}"), XContentType.JSON); + + ClusterState state = ClusterState.builder(new ClusterName("test")).build(); + state = IngestService.innerPut(ecsTrueRequest, state); + state = IngestService.innerPut(ecsFalseRequest, state); + state = IngestService.innerPut(ecsNullRequest, state); + + final ClusterState finalState = state; + List issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(finalState)); + + DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.WARNING, + "User-Agent ingest plugin will always use ECS-formatted output", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-8.0.html" + + "#ingest-user-agent-ecs-always", + "Ingest pipelines [ecs_false, ecs_true] uses the [ecs] option which needs to be removed to work in 8.0"); + assertEquals(singletonList(expected), issues); + } + + public void testTemplateWithTooManyFields() throws IOException { + String tooManyFieldsTemplate = randomAlphaOfLength(5); + String tooManyFieldsWithDefaultFieldsTemplate = randomAlphaOfLength(6); + String goodTemplateName = randomAlphaOfLength(7); + + // A template with too many fields + int tooHighFieldCount = randomIntBetween(1025, 10_000); // 10_000 is arbitrary + XContentBuilder badMappingBuilder = jsonBuilder(); + badMappingBuilder.startObject(); + { + badMappingBuilder.startObject("_doc"); + { + badMappingBuilder.startObject("properties"); + { + addRandomFields(tooHighFieldCount, badMappingBuilder); + } + badMappingBuilder.endObject(); + } + badMappingBuilder.endObject(); + } + badMappingBuilder.endObject(); + + // A template with an OK number of fields + int okFieldCount = randomIntBetween(1, 1024); + XContentBuilder goodMappingBuilder = jsonBuilder(); + goodMappingBuilder.startObject(); + { + goodMappingBuilder.startObject("_doc"); + { + goodMappingBuilder.startObject("properties"); + { + addRandomFields(okFieldCount, goodMappingBuilder); + } + goodMappingBuilder.endObject(); + } + goodMappingBuilder.endObject(); + } + goodMappingBuilder.endObject(); + + final ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(tooManyFieldsTemplate) + .patterns(Collections.singletonList(randomAlphaOfLength(5))) + .putMapping("_doc", Strings.toString(badMappingBuilder)) + .build()) + .put(IndexTemplateMetaData.builder(tooManyFieldsWithDefaultFieldsTemplate) + .patterns(Collections.singletonList(randomAlphaOfLength(5))) + .putMapping("_doc", Strings.toString(badMappingBuilder)) + .settings(Settings.builder() + .put(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), + Collections.singletonList(randomAlphaOfLength(5)).toString())) + .build()) + .put(IndexTemplateMetaData.builder(goodTemplateName) + .patterns(Collections.singletonList(randomAlphaOfLength(5))) + .putMapping("_doc", Strings.toString(goodMappingBuilder)) + .build()) + .build()) + .build(); + + List issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(state)); + + DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.WARNING, + "Fields in index template exceed automatic field expansion limit", + "https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html" + + "#_limiting_the_number_of_auto_expanded_fields", + "Index templates " + Collections.singletonList(tooManyFieldsTemplate) + " have a number of fields which exceeds the " + + "automatic field expansion limit of [1024] and does not have [" + IndexSettings.DEFAULT_FIELD_SETTING.getKey() + "] set, " + + "which may cause queries which use automatic field expansion, such as query_string, simple_query_string, and multi_match " + + "to fail if fields are not explicitly specified in the query."); + assertEquals(singletonList(expected), issues); + } +} diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index b0f5a556ac627..882151553a1b6 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -8,13 +8,20 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; +import java.io.IOException; +import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonList; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class IndexDeprecationChecksTests extends ESTestCase { @@ -34,4 +41,107 @@ public void testOldIndicesCheck() { List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); assertEquals(singletonList(expected), issues); } + + public void testTooManyFieldsCheck() throws IOException { + String simpleMapping = "{\n" + + " \"properties\": {\n" + + " \"some_field\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"other_field\": {\n" + + " \"type\": \"text\",\n" + + " \"properties\": {\n" + + " \"raw\": {\"type\": \"keyword\"}\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + IndexMetaData simpleIndex = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 10)) + .settings(settings(Version.V_7_0_0)) + .numberOfShards(randomIntBetween(1, 100)) + .numberOfReplicas(randomIntBetween(1, 100)) + .putMapping("_doc", simpleMapping) + .build(); + List noIssues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(simpleIndex)); + assertEquals(0, noIssues.size()); + + // Test that it catches having too many fields + int fieldCount = randomIntBetween(1025, 10_000); // 10_000 is arbitrary + + XContentBuilder mappingBuilder = jsonBuilder(); + mappingBuilder.startObject(); + { + mappingBuilder.startObject("properties"); + { + addRandomFields(fieldCount, mappingBuilder); + } + mappingBuilder.endObject(); + } + mappingBuilder.endObject(); + + IndexMetaData tooManyFieldsIndex = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 10)) + .settings(settings(Version.V_7_0_0)) + .numberOfShards(randomIntBetween(1, 100)) + .numberOfReplicas(randomIntBetween(1, 100)) + .putMapping("_doc", Strings.toString(mappingBuilder)) + .build(); + DeprecationIssue expected = new DeprecationIssue(DeprecationIssue.Level.WARNING, + "Number of fields exceeds automatic field expansion limit", + "https://www.elastic.co/guide/en/elasticsearch/reference/7.0/breaking-changes-7.0.html" + + "#_limiting_the_number_of_auto_expanded_fields", + "This index has [" + fieldCount + "] fields, which exceeds the automatic field expansion limit of 1024 " + + "and does not have [" + IndexSettings.DEFAULT_FIELD_SETTING.getKey() + "] set, which may cause queries which use " + + "automatic field expansion, such as query_string, simple_query_string, and multi_match to fail if fields are not " + + "explicitly specified in the query."); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(tooManyFieldsIndex)); + assertEquals(singletonList(expected), issues); + + // Check that it's okay to have too many fields as long as `index.query.default_field` is set + IndexMetaData tooManyFieldsOk = IndexMetaData.builder(randomAlphaOfLengthBetween(5, 10)) + .settings(settings(Version.V_7_0_0) + .put(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), randomAlphaOfLength(5))) + .numberOfShards(randomIntBetween(1, 100)) + .numberOfReplicas(randomIntBetween(1, 100)) + .putMapping("_doc", Strings.toString(mappingBuilder)) + .build(); + List withDefaultFieldIssues = + DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(tooManyFieldsOk)); + assertEquals(0, withDefaultFieldIssues.size()); + } + + static void addRandomFields(final int fieldLimit, + XContentBuilder mappingBuilder) throws IOException { + AtomicInteger fieldCount = new AtomicInteger(0); + List existingFieldNames = new ArrayList<>(); + while (fieldCount.get() < fieldLimit) { + addRandomField(existingFieldNames, fieldLimit, mappingBuilder, fieldCount); + } + } + + private static void addRandomField(List existingFieldNames, final int fieldLimit, + XContentBuilder mappingBuilder, AtomicInteger fieldCount) throws IOException { + if (fieldCount.get() > fieldLimit) { + return; + } + String newField = randomValueOtherThanMany(existingFieldNames::contains, () -> randomAlphaOfLengthBetween(2, 20)); + existingFieldNames.add(newField); + mappingBuilder.startObject(newField); + { + if (rarely()) { + mappingBuilder.startObject("properties"); + { + int subfields = randomIntBetween(1, 10); + while (existingFieldNames.size() < subfields && fieldCount.get() <= fieldLimit) { + addRandomField(existingFieldNames, fieldLimit, mappingBuilder, fieldCount); + } + } + mappingBuilder.endObject(); + } else { + mappingBuilder.field("type", randomFrom("array", "range", "boolean", "date", "ip", "keyword", "text")); + fieldCount.incrementAndGet(); + } + } + mappingBuilder.endObject(); + } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java index 6d93ed1873184..12ed824bf1fbf 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecksTests.java @@ -6,7 +6,10 @@ package org.elasticsearch.xpack.deprecation; -import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -14,6 +17,12 @@ public class MlDeprecationChecksTests extends ESTestCase { + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Override protected boolean enableWarningsCheck() { return false; @@ -22,8 +31,8 @@ protected boolean enableWarningsCheck() { public void testCheckDataFeedQuery() { DatafeedConfig.Builder goodDatafeed = new DatafeedConfig.Builder("good-df", "job-id"); goodDatafeed.setIndices(Collections.singletonList("some-index")); - goodDatafeed.setParsedQuery(new TermQueryBuilder("foo", "bar")); - assertNull(MlDeprecationChecks.checkDataFeedQuery(goodDatafeed.build())); + goodDatafeed.setParsedQuery(QueryBuilders.termQuery("foo", "bar")); + assertNull(MlDeprecationChecks.checkDataFeedQuery(goodDatafeed.build(), xContentRegistry())); DatafeedConfig.Builder deprecatedDatafeed = new DatafeedConfig.Builder("df-with-deprecated-query", "job-id"); deprecatedDatafeed.setIndices(Collections.singletonList("some-index")); @@ -34,7 +43,9 @@ public void testCheckDataFeedQuery() { qs.put("query", "foo"); qs.put("use_dis_max", true); Map query = Collections.singletonMap("query_string", qs); - deprecatedDatafeed.setQuery(query); + deprecatedDatafeed.setQueryProvider(new QueryProvider(query, + XContentObjectTransformer.queryBuilderTransformer(xContentRegistry()).fromMap(query), + null)); DeprecationIssue issue = MlDeprecationChecks.checkDataFeedQuery(deprecatedDatafeed.build()); assertNotNull(issue); diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 470260a7efac0..130d6deed567f 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -41,6 +41,8 @@ public class RestGraphAction extends XPackRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGraphAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in graph requests is deprecated."; public static final ParseField TIMEOUT_FIELD = new ParseField("timeout"); public static final ParseField SIGNIFICANCE_FIELD = new ParseField("use_significance"); @@ -111,7 +113,10 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa parseHop(parser, currentHop, graphRequest); } - graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + if (request.hasParam("type")) { + deprecationLogger.deprecatedAndMaybeLog("graph_with_types", TYPES_DEPRECATION_MESSAGE); + graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + } return channel -> client.es().execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java new file mode 100644 index 0000000000000..486ac4e70e346 --- /dev/null +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.graph.rest.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +public class RestGraphActionTests extends RestActionTestCase { + + @Before + public void setUpAction() { + new RestGraphAction(Settings.EMPTY, controller()); + } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_graph/explore") + .withContent(new BytesArray("{}"), XContentType.JSON) + .build(); + + dispatchRequest(request); + assertWarnings(RestGraphAction.TYPES_DEPRECATION_MESSAGE); + } + +} diff --git a/x-pack/plugin/ilm/qa/build.gradle b/x-pack/plugin/ilm/qa/build.gradle index f2f60527ec4c1..74412a094b489 100644 --- a/x-pack/plugin/ilm/qa/build.gradle +++ b/x-pack/plugin/ilm/qa/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.build' -unitTest.enabled = false +test.enabled = false dependencies { compile project(':test:framework') diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index 34b7cf9e44c58..76dbf676d738f 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -50,11 +50,12 @@ followClusterTestCluster { followClusterTestRunner { systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" /* To support taking index snapshots, we have to set path.repo setting */ systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") finalizedBy 'leaderClusterTestCluster#stop' } check.dependsOn followClusterTest -unitTest.enabled = false // no unit tests for this module, only the rest integration test +test.enabled = false // no unit tests for this module, only the rest integration test diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index b3c93acb97b99..b185a425934eb 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -30,6 +31,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class CCRIndexLifecycleIT extends ESCCRRestTestCase { @@ -326,9 +329,6 @@ public void testUnfollowInjectedBeforeShrink() throws Exception { } } - // Specifically, this is waiting for this bullet to be complete: - // - integrate shard history retention leases with cross-cluster replication - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37165") public void testCannotShrinkLeaderIndex() throws Exception { String indexName = "shrink-leader-test"; String shrunkenIndexName = "shrink-" + indexName; @@ -361,11 +361,7 @@ public void testCannotShrinkLeaderIndex() throws Exception { changePolicyRequest.setEntity(changePolicyEntity); assertOK(leaderClient.performRequest(changePolicyRequest)); - index(leaderClient, indexName, "1"); - assertDocumentExists(leaderClient, indexName, "1"); - assertBusy(() -> { - assertDocumentExists(client(), indexName, "1"); // Sanity check that following_index setting has been set, so that we can verify later that this setting has been unset: assertThat(getIndexSetting(client(), indexName, "index.xpack.ccr.following_index"), equalTo("true")); @@ -374,6 +370,20 @@ public void testCannotShrinkLeaderIndex() throws Exception { assertILMPolicy(client(), indexName, policyName, "hot", "unfollow", "wait-for-indexing-complete"); }); + // Index a bunch of documents and wait for them to be replicated + for (int i = 0; i < 50; i++) { + index(leaderClient, indexName, Integer.toString(i)); + } + assertBusy(() -> { + for (int i = 0; i < 50; i++) { + assertDocumentExists(client(), indexName, Integer.toString(i)); + } + }); + + // Then make sure both leader and follower are still both waiting + assertILMPolicy(leaderClient, indexName, policyName, "warm", "shrink", "wait-for-shard-history-leases"); + assertILMPolicy(client(), indexName, policyName, "hot", "unfollow", "wait-for-indexing-complete"); + // Manually set this to kick the process updateIndexSettings(leaderClient, indexName, Settings.builder() .put("index.lifecycle.indexing_complete", true) @@ -396,6 +406,97 @@ public void testCannotShrinkLeaderIndex() throws Exception { } + public void testILMUnfollowFailsToRemoveRetentionLeases() throws Exception { + final String leaderIndex = "leader"; + final String followerIndex = "follower"; + final String policyName = "unfollow_only_policy"; + + if ("leader".equals(targetCluster)) { + Settings indexSettings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.lifecycle.name", policyName) // this policy won't exist on the leader, that's fine + .build(); + createIndex(leaderIndex, indexSettings, "", ""); + ensureGreen(leaderIndex); + } else if ("follow".equals(targetCluster)) { + try (RestClient leaderClient = buildLeaderClient()) { + String leaderRemoteClusterSeed = System.getProperty("tests.leader_remote_cluster_seed"); + configureRemoteClusters("other_remote", leaderRemoteClusterSeed); + assertBusy(() -> { + Map localConnection = (Map) toMap(client() + .performRequest(new Request("GET", "/_remote/info"))) + .get("other_remote"); + assertThat(localConnection, notNullValue()); + assertThat(localConnection.get("connected"), is(true)); + }); + putUnfollowOnlyPolicy(client(), policyName); + // Set up the follower + followIndex("other_remote", leaderIndex, followerIndex); + ensureGreen(followerIndex); + // Pause ILM so that this policy doesn't proceed until we want it to + client().performRequest(new Request("POST", "/_ilm/stop")); + + // Set indexing complete and wait for it to be replicated + updateIndexSettings(leaderClient, leaderIndex, Settings.builder() + .put("index.lifecycle.indexing_complete", true) + .build() + ); + assertBusy(() -> { + assertThat(getIndexSetting(client(), followerIndex, "index.lifecycle.indexing_complete"), is("true")); + }); + + // Remove remote cluster alias: + configureRemoteClusters("other_remote", null); + assertBusy(() -> { + Map localConnection = (Map) toMap(client() + .performRequest(new Request("GET", "/_remote/info"))) + .get("other_remote"); + assertThat(localConnection, nullValue()); + }); + // Then add it back with an incorrect seed node: + // (unfollow api needs a remote cluster alias) + configureRemoteClusters("other_remote", "localhost:9999"); + assertBusy(() -> { + Map localConnection = (Map) toMap(client() + .performRequest(new Request("GET", "/_remote/info"))) + .get("other_remote"); + assertThat(localConnection, notNullValue()); + assertThat(localConnection.get("connected"), is(false)); + + Request statsRequest = new Request("GET", "/" + followerIndex + "/_ccr/stats"); + Map response = toMap(client().performRequest(statsRequest)); + logger.info("follow shards response={}", response); + String expectedIndex = ObjectPath.eval("indices.0.index", response); + assertThat(expectedIndex, equalTo(followerIndex)); + Object fatalError = ObjectPath.eval("indices.0.shards.0.read_exceptions.0", response); + assertThat(fatalError, notNullValue()); + }); + + // Start ILM back up and let it unfollow + client().performRequest(new Request("POST", "/_ilm/start")); + // Wait for the policy to be complete + assertBusy(() -> { + assertILMPolicy(client(), followerIndex, policyName, "completed", "completed", "completed"); + }); + + // Ensure the "follower" index has successfully unfollowed + assertBusy(() -> { + assertThat(getIndexSetting(client(), followerIndex, "index.xpack.ccr.following_index"), nullValue()); + }); + } + } + } + + private void configureRemoteClusters(String name, String leaderRemoteClusterSeed) throws IOException { + logger.info("Configuring leader remote cluster [{}]", leaderRemoteClusterSeed); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{\"persistent\": {\"cluster.remote." + name + ".seeds\": " + + (leaderRemoteClusterSeed != null ? String.format(Locale.ROOT, "\"%s\"", leaderRemoteClusterSeed) : null) + "}}"); + assertThat(client().performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); + } + private static void putILMPolicy(String name, String maxSize, Integer maxDocs, TimeValue maxAge) throws IOException { final Request request = new Request("PUT", "_ilm/policy/" + name); XContentBuilder builder = jsonBuilder(); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index 8f07b532769c3..b6b317e0c67ef 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -59,6 +59,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { private String index; @@ -218,6 +219,7 @@ public void testRetryFailedShrinkAction() throws Exception { assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue()); }); expectThrows(ResponseException.class, this::indexDocument); } @@ -461,6 +463,7 @@ public void testShrinkAction() throws Exception { assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue()); }); expectThrows(ResponseException.class, this::indexDocument); } @@ -480,6 +483,7 @@ public void testShrinkSameShards() throws Exception { assertThat(getStepKeyForIndex(index), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(numberOfShards))); assertNull(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey())); + assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue()); }); } @@ -523,6 +527,7 @@ public void testShrinkDuringSnapshot() throws Exception { assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(1))); assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + assertThat(settings.get(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue()); }, 2, TimeUnit.MINUTES); expectThrows(ResponseException.class, this::indexDocument); // assert that snapshot succeeded @@ -812,6 +817,28 @@ public void testMoveToInjectedStep() throws Exception { }); } + public void testCanStopILMWithPolicyUsingNonexistentPolicy() throws Exception { + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(5,15))); + + Request stopILMRequest = new Request("POST", "_ilm/stop"); + assertOK(client().performRequest(stopILMRequest)); + + Request statusRequest = new Request("GET", "_ilm/status"); + assertBusy(() -> { + Response statusResponse = client().performRequest(statusRequest); + assertOK(statusResponse); + Map statusResponseMap = entityAsMap(statusResponse); + String status = (String) statusResponseMap.get("operation_mode"); + assertEquals("STOPPED", status); + }); + + // Re-start ILM so that subsequent tests don't fail + Request startILMReqest = new Request("POST", "_ilm/start"); + assertOK(client().performRequest(startILMReqest)); + } + private void createFullPolicy(TimeValue hotTime) throws IOException { Map hotActions = new HashMap<>(); hotActions.put(SetPriorityAction.NAME, new SetPriorityAction(100)); diff --git a/x-pack/plugin/ilm/qa/rest/build.gradle b/x-pack/plugin/ilm/qa/rest/build.gradle index 7a79d1c20d936..c69a3dfce2143 100644 --- a/x-pack/plugin/ilm/qa/rest/build.gradle +++ b/x-pack/plugin/ilm/qa/rest/build.gradle @@ -42,4 +42,4 @@ restTestCluster { } check.dependsOn restTest -unitTest.enabled = false +test.enabled = false diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java index 78fc2700f860e..8e74426e0b390 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -7,9 +7,12 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -19,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; @@ -26,6 +30,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.Phase; +import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; import org.junit.Before; import java.io.IOException; @@ -36,8 +41,10 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PermissionsIT extends ESRestTestCase { + private static final String jsonDoc = "{ \"name\" : \"elasticsearch\", \"body\": \"foo bar\" }"; private String deletePolicy = "deletePolicy"; private Settings indexSettingsWithPolicy; @@ -74,13 +81,13 @@ public void init() throws Exception { .put("number_of_shards", 1) .put("number_of_replicas", 0) .build(); - createNewSingletonPolicy(deletePolicy,"delete", new DeleteAction()); + createNewSingletonPolicy(client(), deletePolicy,"delete", new DeleteAction()); } /** * Tests that a policy that simply deletes an index after 0s succeeds when an index * with user `test_admin` is created referencing a policy created by `test_ilm` when both - * users have read/write permissions on the the index. The goal is to verify that one + * users have read/write permissions on the index. The goal is to verify that one * does not need to be the same user who created both the policy and the index to have the * index be properly managed by ILM. */ @@ -126,7 +133,63 @@ public void testCanViewExplainOnUnmanagedIndex() throws Exception { assertOK(client().performRequest(request)); } - private void createNewSingletonPolicy(String policy, String phaseName, LifecycleAction action) throws IOException { + /** + * Tests when the user is limited by alias of an index is able to write to index + * which was rolled over by an ILM policy. + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41440") + public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledoverByILMPolicy() + throws IOException, InterruptedException { + /* + * Setup: + * - ILM policy to rollover index when max docs condition is met + * - Index template to which the ILM policy applies and create Index + * - Create role with just write and manage privileges on alias + * - Create user and assign newly created role. + */ + createNewSingletonPolicy(adminClient(), "foo-policy", "hot", new RolloverAction(null, null, 2L)); + createIndexTemplate("foo-template", "foo-logs-*", "foo_alias", "foo-policy"); + createIndexAsAdmin("foo-logs-000001", "foo_alias", randomBoolean()); + createRole("foo_alias_role", "foo_alias"); + createUser("test_user", "x-pack-test-password", "foo_alias_role"); + + // test_user: index docs using alias in the newly created index + indexDocs("test_user", "x-pack-test-password", "foo_alias", 2); + refresh("foo_alias"); + + // wait so the ILM policy triggers rollover action, verify that the new index exists + assertThat(awaitBusy(() -> { + Request request = new Request("HEAD", "/" + "foo-logs-000002"); + int status; + try { + status = adminClient().performRequest(request).getStatusLine().getStatusCode(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return status == 200; + }), is(true)); + + // test_user: index docs using alias, now should be able write to new index + indexDocs("test_user", "x-pack-test-password", "foo_alias", 1); + refresh("foo_alias"); + + // verify that the doc has been indexed into new write index + awaitBusy(() -> { + Request request = new Request("GET", "/foo-logs-000002/_search"); + Response response; + try { + response = adminClient().performRequest(request); + try (InputStream content = response.getEntity().getContent()) { + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + return ((Integer) XContentMapValues.extractValue("hits.total.value", map)) == 1; + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + private void createNewSingletonPolicy(RestClient client, String policy, String phaseName, LifecycleAction action) throws IOException { Phase phase = new Phase(phaseName, TimeValue.ZERO, singletonMap(action.getWriteableName(), action)); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase)); XContentBuilder builder = jsonBuilder(); @@ -135,7 +198,7 @@ private void createNewSingletonPolicy(String policy, String phaseName, Lifecycle "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); Request request = new Request("PUT", "_ilm/policy/" + policy); request.setEntity(entity); - client().performRequest(request); + assertOK(client.performRequest(request)); } private void createIndexAsAdmin(String name, Settings settings, String mapping) throws IOException { @@ -144,4 +207,59 @@ private void createIndexAsAdmin(String name, Settings settings, String mapping) + ", \"mappings\" : {" + mapping + "} }"); assertOK(adminClient().performRequest(request)); } + + private void createIndexAsAdmin(String name, String alias, boolean isWriteIndex) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{ \"aliases\": { \""+alias+"\": {" + ((isWriteIndex) ? "\"is_write_index\" : true" : "") + + "} } }"); + assertOK(adminClient().performRequest(request)); + } + + private void createIndexTemplate(String name, String pattern, String alias, String policy) throws IOException { + Request request = new Request("PUT", "/_template/" + name); + request.setJsonEntity("{\n" + + " \"index_patterns\": [\""+pattern+"\"],\n" + + " \"settings\": {\n" + + " \"number_of_shards\": 1,\n" + + " \"number_of_replicas\": 0,\n" + + " \"index.lifecycle.name\": \""+policy+"\",\n" + + " \"index.lifecycle.rollover_alias\": \""+alias+"\"\n" + + " }\n" + + " }"); + assertOK(adminClient().performRequest(request)); + } + + private void createUser(String name, String password, String role) throws IOException { + Request request = new Request("PUT", "/_security/user/" + name); + request.setJsonEntity("{ \"password\": \""+password+"\", \"roles\": [ \""+ role+"\"] }"); + assertOK(adminClient().performRequest(request)); + } + + private void createRole(String name, String alias) throws IOException { + Request request = new Request("PUT", "/_security/role/" + name); + request.setJsonEntity("{ \"indices\": [ { \"names\" : [ \""+ alias+"\"], \"privileges\": [ \"write\", \"manage\" ] } ] }"); + assertOK(adminClient().performRequest(request)); + } + + private void indexDocs(String user, String passwd, String index, int noOfDocs) throws IOException { + RestClientBuilder builder = RestClient.builder(adminClient().getNodes().toArray(new Node[0])); + String token = basicAuthHeaderValue(user, new SecureString(passwd.toCharArray())); + configureClient(builder, Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build()); + builder.setStrictDeprecationMode(true); + try (RestClient userClient = builder.build();) { + + for (int cnt = 0; cnt < noOfDocs; cnt++) { + Request request = new Request("POST", "/" + index + "/_doc"); + request.setJsonEntity(jsonDoc); + assertOK(userClient.performRequest(request)); + } + } + } + + private void refresh(String index) throws IOException { + Request request = new Request("POST", "/" + index + "/_refresh"); + assertOK(adminClient().performRequest(request)); + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java index f6c068d945d79..05ad342f3e779 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -307,6 +307,9 @@ static ClusterState moveClusterStateToStep(String indexName, ClusterState curren "] with policy [" + indexPolicySetting + "] does not exist"); } + logger.info("moving index [{}] from [{}] to [{}] in policy [{}]", + indexName, currentStepKey, nextStepKey, indexPolicySetting); + return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, nextStepKey, nowSupplier, forcePhaseDefinitionRefresh); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java index d143e80340cc0..a159b9f965e2a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.Lifecycle.State; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -28,7 +29,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; -import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; @@ -42,9 +43,9 @@ * A service which runs the {@link LifecyclePolicy}s associated with indexes. */ public class IndexLifecycleService - implements ClusterStateListener, ClusterStateApplier, SchedulerEngine.Listener, Closeable, LocalNodeMasterListener { + implements ClusterStateListener, ClusterStateApplier, SchedulerEngine.Listener, Closeable, LocalNodeMasterListener { private static final Logger logger = LogManager.getLogger(IndexLifecycleService.class); - private static final Set IGNORE_ACTIONS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkAction.NAME); + private static final Set IGNORE_STEPS_MAINTENANCE_REQUESTED = Collections.singleton(ShrinkStep.NAME); private volatile boolean isMaster = false; private volatile TimeValue pollInterval; @@ -53,8 +54,6 @@ public class IndexLifecycleService private final PolicyStepsRegistry policyRegistry; private final IndexLifecycleRunner lifecycleRunner; private final Settings settings; - private final ThreadPool threadPool; - private Client client; private ClusterService clusterService; private LongSupplier nowSupplier; private SchedulerEngine.Job scheduledJob; @@ -63,13 +62,11 @@ public IndexLifecycleService(Settings settings, Client client, ClusterService cl LongSupplier nowSupplier, NamedXContentRegistry xContentRegistry) { super(); this.settings = settings; - this.client = client; this.clusterService = clusterService; this.clock = clock; this.nowSupplier = nowSupplier; this.scheduledJob = null; this.policyRegistry = new PolicyStepsRegistry(xContentRegistry, client); - this.threadPool = threadPool; this.lifecycleRunner = new IndexLifecycleRunner(policyRegistry, clusterService, threadPool, nowSupplier); this.pollInterval = LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); clusterService.addStateApplier(this); @@ -114,18 +111,26 @@ public void onMaster() { IndexMetaData idxMeta = cursor.value; String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); if (Strings.isNullOrEmpty(policyName) == false) { - StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); - if (OperationMode.STOPPING == currentMode && - stepKey != null && - IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { - logger.info("skipping policy [{}] for index [{}]. stopping Index Lifecycle execution", - policyName, idxMeta.getIndex().getName()); - continue; + final LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + + if (OperationMode.STOPPING == currentMode) { + if (stepKey != null && IGNORE_STEPS_MAINTENANCE_REQUESTED.contains(stepKey.getName())) { + logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in step [{}]", + idxMeta.getIndex().getName(), policyName, stepKey.getName()); + lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); + // ILM is trying to stop, but this index is in a Shrink step (or other dangerous step) so we can't stop + safeToStop = false; + } else { + logger.info("skipping policy execution of step [{}] for index [{}] with policy [{}] because ILM is stopping", + stepKey == null ? "n/a" : stepKey.getName(), idxMeta.getIndex().getName(), policyName); + } + } else { + lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); } - lifecycleRunner.maybeRunAsyncAction(clusterState, idxMeta, policyName, stepKey); - safeToStop = false; // proven false! } } + if (safeToStop && OperationMode.STOPPING == currentMode) { submitOperationModeUpdate(OperationMode.STOPPED); } @@ -158,14 +163,21 @@ SchedulerEngine.Job getScheduledJob() { return scheduledJob; } - private void maybeScheduleJob() { + private synchronized void maybeScheduleJob() { if (this.isMaster) { if (scheduler.get() == null) { - scheduler.set(new SchedulerEngine(settings, clock)); - scheduler.get().register(this); + // don't create scheduler if the node is shutting down + if (isClusterServiceStoppedOrClosed() == false) { + scheduler.set(new SchedulerEngine(settings, clock)); + scheduler.get().register(this); + } + } + + // scheduler could be null if the node might be shutting down + if (scheduler.get() != null) { + scheduledJob = new SchedulerEngine.Job(XPackField.INDEX_LIFECYCLE, new TimeValueSchedule(pollInterval)); + scheduler.get().add(scheduledJob); } - scheduledJob = new SchedulerEngine.Job(XPackField.INDEX_LIFECYCLE, new TimeValueSchedule(pollInterval)); - scheduler.get().add(scheduledJob); } } @@ -180,7 +192,7 @@ public void clusterChanged(ClusterChangedEvent event) { @Override public void applyClusterState(ClusterChangedEvent event) { if (event.localNodeMaster()) { // only act if we are master, otherwise - // keep idle until elected + // keep idle until elected if (event.state().metaData().custom(IndexLifecycleMetadata.TYPE) != null) { policyRegistry.update(event.state()); } @@ -233,28 +245,45 @@ void triggerPolicies(ClusterState clusterState, boolean fromClusterStateChange) IndexMetaData idxMeta = cursor.value; String policyName = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()); if (Strings.isNullOrEmpty(policyName) == false) { - StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(LifecycleExecutionState.fromIndexMetadata(idxMeta)); - if (OperationMode.STOPPING == currentMode && stepKey != null - && IGNORE_ACTIONS_MAINTENANCE_REQUESTED.contains(stepKey.getAction()) == false) { - logger.info("skipping policy [" + policyName + "] for index [" + idxMeta.getIndex().getName() - + "]. stopping Index Lifecycle execution"); - continue; - } - if (fromClusterStateChange) { - lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); + final LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); + StepKey stepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); + + if (OperationMode.STOPPING == currentMode) { + if (stepKey != null && IGNORE_STEPS_MAINTENANCE_REQUESTED.contains(stepKey.getName())) { + logger.info("waiting to stop ILM because index [{}] with policy [{}] is currently in step [{}]", + idxMeta.getIndex().getName(), policyName, stepKey.getName()); + if (fromClusterStateChange) { + lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); + } else { + lifecycleRunner.runPeriodicStep(policyName, idxMeta); + } + // ILM is trying to stop, but this index is in a Shrink step (or other dangerous step) so we can't stop + safeToStop = false; + } else { + logger.info("skipping policy execution of step [{}] for index [{}] with policy [{}] because ILM is stopping", + stepKey == null ? "n/a" : stepKey.getName(), idxMeta.getIndex().getName(), policyName); + } } else { - lifecycleRunner.runPeriodicStep(policyName, idxMeta); + if (fromClusterStateChange) { + lifecycleRunner.runPolicyAfterStateChange(policyName, idxMeta); + } else { + lifecycleRunner.runPeriodicStep(policyName, idxMeta); + } } - safeToStop = false; // proven false! } } + if (safeToStop && OperationMode.STOPPING == currentMode) { submitOperationModeUpdate(OperationMode.STOPPED); } } @Override - public void close() { + public synchronized void close() { + // this assertion is here to ensure that the check we use in maybeScheduleJob is accurate for detecting a shutdown in + // progress, which is that the cluster service is stopped and closed at some point prior to closing plugins + assert isClusterServiceStoppedOrClosed() : "close is called by closing the plugin, which is expected to happen after " + + "the cluster service is stopped"; SchedulerEngine engine = scheduler.get(); if (engine != null) { engine.stop(); @@ -265,4 +294,13 @@ public void submitOperationModeUpdate(OperationMode mode) { clusterService.submitStateUpdateTask("ilm_operation_mode_update", new OperationModeUpdateTask(mode)); } + + /** + * Method that checks if the lifecycle state of the cluster service is stopped or closed. This + * enhances the readability of the code. + */ + private boolean isClusterServiceStoppedOrClosed() { + final State state = clusterService.lifecycleState(); + return state == State.STOPPED || state == State.CLOSED; + } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java index d753a5035f756..b1c1785cb1cf6 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/PolicyStepsRegistry.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -125,7 +126,7 @@ public LifecyclePolicyMetadata read(StreamInput in, String key) { List policyAsSteps = policyMetadata.getPolicy().toSteps(policyClient); if (policyAsSteps.isEmpty() == false) { firstStepMap.put(policyMetadata.getName(), policyAsSteps.get(0)); - final Map stepMapForPolicy = new HashMap<>(); + final Map stepMapForPolicy = new LinkedHashMap<>(); for (Step step : policyAsSteps) { assert ErrorStep.NAME.equals(step.getKey().getName()) == false : "unexpected error step in policy"; stepMapForPolicy.put(step.getKey(), step); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java index 57f08eba76490..e5cb15f6c3b24 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportMoveToStepAction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.indexlifecycle.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -25,6 +27,8 @@ import org.elasticsearch.xpack.indexlifecycle.IndexLifecycleService; public class TransportMoveToStepAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportMoveToStepAction.class); + IndexLifecycleService indexLifecycleService; @Inject public TransportMoveToStepAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java index 790dd5de632e6..2444cbf99fd52 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -16,18 +16,18 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; -import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata.IndexLifecycleMetadataDiff; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; @@ -137,7 +137,7 @@ protected Reader> diffReader() { } public void testMinimumSupportedVersion() { - assertEquals(Version.V_7_0_0, createTestInstance().getMinimalSupportedVersion()); + assertEquals(Version.V_6_6_0, createTestInstance().getMinimalSupportedVersion()); } public void testcontext() { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java index 565a9723c8c6c..511d0e5be1ab9 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunnerTests.java @@ -385,14 +385,12 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { ClusterState before = clusterService.state(); CountDownLatch latch = new CountDownLatch(1); step.setLatch(latch); + CountDownLatch asyncLatch = new CountDownLatch(1); + nextStep.setLatch(asyncLatch); runner.runPolicyAfterStateChange(policyName, indexMetaData); // Wait for the cluster state action step awaitLatch(latch, 5, TimeUnit.SECONDS); - - CountDownLatch asyncLatch = new CountDownLatch(1); - nextStep.setLatch(asyncLatch); - // Wait for the async action step awaitLatch(asyncLatch, 5, TimeUnit.SECONDS); ClusterState after = clusterService.state(); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java index 810f913775e3b..3757c1cd5fb4f 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleServiceTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.component.Lifecycle.State; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -36,6 +37,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.junit.After; @@ -57,6 +59,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -91,6 +94,7 @@ public void prepareServices() { Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build(); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING))); + when(clusterService.lifecycleState()).thenReturn(State.STARTED); Client client = mock(Client.class); AdminClient adminClient = mock(AdminClient.class); @@ -108,6 +112,7 @@ public void prepareServices() { @After public void cleanup() { + when(clusterService.lifecycleState()).thenReturn(randomFrom(State.STOPPED, State.CLOSED)); indexLifecycleService.close(); threadPool.shutdownNow(); } @@ -145,7 +150,7 @@ public void testStoppedModeSkip() { } public void testRequestedStopOnShrink() { - Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, randomAlphaOfLength(5)); + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, ShrinkStep.NAME); String policyName = randomAlphaOfLengthBetween(1, 20); IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); @@ -177,14 +182,67 @@ public void testRequestedStopOnShrink() { .build(); ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); - SetOnce executedShrink = new SetOnce<>(); + SetOnce changedOperationMode = new SetOnce<>(); doAnswer(invocationOnMock -> { - executedShrink.set(true); + changedOperationMode.set(true); return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class)); + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); + indexLifecycleService.applyClusterState(event); + indexLifecycleService.triggerPolicies(currentState, true); + assertNull(changedOperationMode.get()); + } + + public void testRequestedStopInShrinkActionButNotShrinkStep() { + // test all the shrink action steps that ILM can be stopped during (basically all of them minus the actual shrink) + ShrinkAction action = new ShrinkAction(1); + action.toSteps(mock(Client.class), "warm", randomStepKey()).stream() + .map(sk -> sk.getKey().getName()) + .filter(name -> name.equals(ShrinkStep.NAME) == false) + .forEach(this::verifyCanStopWithStep); + } + + // Check that ILM can stop when in the shrink action on the provided step + private void verifyCanStopWithStep(String stoppableStep) { + Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, stoppableStep); + String policyName = randomAlphaOfLengthBetween(1, 20); + IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep = + new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey()); + MockAction mockAction = new MockAction(Collections.singletonList(mockStep)); + Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction)); + LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase)); + SortedMap policyMap = new TreeMap<>(); + policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong())); + Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(mockShrinkStep.getPhase()); + lifecycleState.setAction(mockShrinkStep.getAction()); + lifecycleState.setStep(mockShrinkStep.getName()); + IndexMetaData indexMetadata = IndexMetaData.builder(index.getName()) + .settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName)) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + ImmutableOpenMap.Builder indices = ImmutableOpenMap. builder() + .fPut(index.getName(), indexMetadata); + MetaData metaData = MetaData.builder() + .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .indices(indices.build()) + .persistentSettings(settings(Version.CURRENT).build()) + .build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .metaData(metaData) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE); + SetOnce changedOperationMode = new SetOnce<>(); + doAnswer(invocationOnMock -> { + changedOperationMode.set(true); + return null; + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); indexLifecycleService.applyClusterState(event); indexLifecycleService.triggerPolicies(currentState, true); - assertTrue(executedShrink.get()); + assertTrue(changedOperationMode.get()); } public void testRequestedStopOnSafeAction() { @@ -233,7 +291,7 @@ public void testRequestedStopOnSafeAction() { assertThat(task.getOperationMode(), equalTo(OperationMode.STOPPED)); moveToMaintenance.set(true); return null; - }).when(clusterService).submitStateUpdateTask(anyString(), any(OperationModeUpdateTask.class)); + }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); indexLifecycleService.applyClusterState(event); indexLifecycleService.triggerPolicies(currentState, randomBoolean()); diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index cf70c16cdb0b9..d3ca4dd0098b7 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackPlugin; @@ -63,6 +64,8 @@ public UnaryOperator> getIndexTemplateMetaDat templates.keySet().removeIf(OLD_LOGSTASH_INDEX_NAME::equals); TemplateUtils.loadTemplateIntoMap("/" + LOGSTASH_TEMPLATE_FILE_NAME + ".json", templates, LOGSTASH_INDEX_TEMPLATE_NAME, Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN, LogManager.getLogger(Logstash.class)); + //internal representation of typeless templates requires the default "_doc" type, which is also required for internal templates + assert templates.get(LOGSTASH_INDEX_TEMPLATE_NAME).mappings().get(MapperService.SINGLE_MAPPING_NAME) != null; return templates; }; } diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 0fe0af236f9ad..ba0caaa8e0041 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -11,13 +9,26 @@ esplugin { extendedPlugins = ['x-pack-core', 'lang-painless'] } + +repositories { + ivy { + name "ml-cpp" + url "https://prelert-artifacts.s3.amazonaws.com" + metadataSources { + // no repository metadata, look directly for the artifact + artifact() + } + patternLayout { + artifact "maven/org/elasticsearch/ml/ml-cpp/[revision]/ml-cpp-[revision].[ext]" + } + } +} + configurations { nativeBundle { resolutionStrategy.dependencySubstitution { if (findProject(':ml-cpp') != null) { substitute module("org.elasticsearch.ml:ml-cpp") with project(":ml-cpp") - } else { - substitute module("org.elasticsearch.ml:ml-cpp") with project("${project.path}:cpp-snapshot") } } } @@ -94,12 +105,11 @@ integTest.enabled = false // Instead we create a separate task to run the // tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' +task internalClusterTest(type: Test) { + description = 'Multi-node tests' + + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' } check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test diff --git a/x-pack/plugin/ml/cpp-snapshot/.gitignore b/x-pack/plugin/ml/cpp-snapshot/.gitignore deleted file mode 100644 index 16d3c4dbbfec5..0000000000000 --- a/x-pack/plugin/ml/cpp-snapshot/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.cache diff --git a/x-pack/plugin/ml/cpp-snapshot/build.gradle b/x-pack/plugin/ml/cpp-snapshot/build.gradle deleted file mode 100644 index e5b55293159aa..0000000000000 --- a/x-pack/plugin/ml/cpp-snapshot/build.gradle +++ /dev/null @@ -1,55 +0,0 @@ -import org.elasticsearch.gradle.VersionProperties - -apply plugin: 'distribution' - -ext.version = VersionProperties.elasticsearch - -// This project pulls a snapshot version of the ML cpp artifacts and sets that as the artifact -// for this project so it can be used with dependency substitution. - -void getZip(File snapshotZip) { - String zipUrl = "http://prelert-artifacts.s3.amazonaws.com/maven/org/elasticsearch/ml/ml-cpp/${version}/ml-cpp-${version}.zip" - File snapshotMd5 = new File(snapshotZip.toString() + '.md5') - HttpURLConnection conn = (HttpURLConnection) new URL(zipUrl).openConnection(); - - // do a HEAD first to check the zip hash against the local file - conn.setRequestMethod('HEAD'); - if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) { - throw new GradleException('ML cpp snapshot does not exist') - } - - String remoteMd5 = conn.getHeaderField('ETag') - if (snapshotZip.exists()) { - // do a HEAD first to check the zip hash against the local file - String localMd5 = snapshotMd5.getText('UTF-8') - if (remoteMd5.equals(localMd5)) { - logger.info('Using cached ML snapshot') - return - } - } - - snapshotZip.bytes = new URL(zipUrl).bytes - snapshotMd5.setText(remoteMd5, 'UTF-8') -} - -File snapshotZip = new File(projectDir, ".cache/ml-cpp-${version}.zip") -task downloadMachineLearningSnapshot { - onlyIf { - // skip if ml-cpp is being built locally - findProject(':ml-cpp') == null && - // skip for offline builds - just rely on the artifact already having been downloaded before here - project.gradle.startParameter.isOffline() == false - } - doFirst { - snapshotZip.parentFile.mkdirs() - getZip(snapshotZip) - } -} - -task cleanCache(type: Delete) { - delete "${projectDir}/.cache" -} - -artifacts { - 'default' file: snapshotZip, name: 'ml-cpp', type: 'zip', builtBy: downloadMachineLearningSnapshot -} diff --git a/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java index 9e31ddb131c6f..67b72a648db60 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java +++ b/x-pack/plugin/ml/qa/ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java @@ -35,9 +35,9 @@ public void test() throws IOException { // role for (ExecutableSection section : testCandidate.getTestSection().getExecutableSections()) { if (section instanceof DoSection) { - if (((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.") && - ((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.get_") == false && - ((DoSection) section).getApiCallSection().getApi().equals("xpack.ml.find_file_structure") == false) { + if (((DoSection) section).getApiCallSection().getApi().startsWith("ml.") && + ((DoSection) section).getApiCallSection().getApi().startsWith("ml.get_") == false && + ((DoSection) section).getApiCallSection().getApi().equals("ml.find_file_structure") == false) { fail("should have failed because of missing role"); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index 95a2f8db2c49c..22fd7837628b9 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -44,7 +44,7 @@ integTestCluster { setting 'xpack.security.transport.ssl.key', nodeKey.name setting 'xpack.security.transport.ssl.certificate', nodeCert.name setting 'xpack.security.transport.ssl.verification_mode', 'certificate' - setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.ml.min_disk_space_off_heap', '200mb' diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java index 3d49e03321892..c873aadfd23dc 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -35,38 +36,37 @@ public class CategorizationIT extends MlNativeAutodetectIntegTestCase { private static final String DATA_INDEX = "log-data"; - private static final String DATA_TYPE = "log"; private long nowMillis; @Before public void setUpData() { client().admin().indices().prepareCreate(DATA_INDEX) - .addMapping(DATA_TYPE, "time", "type=date,format=epoch_millis", + .addMapping(SINGLE_MAPPING_NAME, "time", "type=date,format=epoch_millis", "msg", "type=text") .get(); nowMillis = System.currentTimeMillis(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - IndexRequest indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + IndexRequest indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", nowMillis - TimeValue.timeValueHours(2).millis(), "msg", "Node 1 started"); bulkRequestBuilder.add(indexRequest); - indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", nowMillis - TimeValue.timeValueHours(2).millis() + 1, "msg", "Failed to shutdown [error org.aaaa.bbbb.Cccc line 54 caused " + "by foo exception]"); bulkRequestBuilder.add(indexRequest); - indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", nowMillis - TimeValue.timeValueHours(1).millis(), "msg", "Node 2 started"); bulkRequestBuilder.add(indexRequest); - indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", nowMillis - TimeValue.timeValueHours(1).millis() + 1, "msg", "Failed to shutdown [error but this time completely different]"); bulkRequestBuilder.add(indexRequest); - indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", nowMillis, "msg", "Node 3 started"); bulkRequestBuilder.add(indexRequest); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index e721b5b88fdf8..8c5f5cf1e39f9 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -308,7 +308,7 @@ public void testLookbackOnlyWithNestedFields() throws Exception { client().performRequest(createJobRequest); String datafeedId = jobId + "-datafeed"; - new DatafeedBuilder(datafeedId, jobId, "nested-data", "response").build(); + new DatafeedBuilder(datafeedId, jobId, "nested-data").build(); openJob(client(), jobId); startDatafeedAndWaitUntilStopped(datafeedId); @@ -351,7 +351,7 @@ public void testInsufficientSearchPrivilegesOnPut() throws Exception { // create a datafeed they DON'T have permission to search the index the datafeed is // configured to read ResponseException e = expectThrows(ResponseException.class, () -> - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response") + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs") .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN) .build()); @@ -419,7 +419,7 @@ public void testInsufficientSearchPrivilegesOnPutWithRollup() throws Exception { ResponseException e = expectThrows(ResponseException.class, () -> - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "doc") + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup") .setAggregations(aggregations) .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) //want to search, but no admin access .build()); @@ -449,7 +449,7 @@ public void testInsufficientSearchPrivilegesOnPreview() throws Exception { client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs").build(); // This should be disallowed, because ml_admin is trying to preview a datafeed created by // by another user (x_pack_rest_user in this case) that will reveal the content of an index they @@ -490,7 +490,7 @@ public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airline\":{\"terms\":{\"field\":\"airline\",\"size\":10}," + " \"aggregations\":{\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}}}"; - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").setAggregations(aggregations).build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs").setAggregations(aggregations).build(); openJob(client(), jobId); startDatafeedAndWaitUntilStopped(datafeedId); @@ -529,7 +529,7 @@ public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exceptio + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airline\":{\"terms\":{\"field\":\"airline\",\"size\":10}," + " \"aggregations\":{\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}}}"; - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").setAggregations(aggregations).build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs").setAggregations(aggregations).build(); openJob(client(), jobId); startDatafeedAndWaitUntilStopped(datafeedId); @@ -568,7 +568,7 @@ public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate( + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; - new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + new DatafeedBuilder(datafeedId, jobId, "network-data") .setAggregations(aggregations) .setChunkingTimespan("300s") .build(); @@ -614,7 +614,7 @@ public void testLookbackUsingDerivativeAggWithSmallerHistogramBucketThanDataRate + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; - new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + new DatafeedBuilder(datafeedId, jobId, "network-data") .setAggregations(aggregations) .setChunkingTimespan("300s") .build(); @@ -658,7 +658,7 @@ public void testLookbackWithoutPermissions() throws Exception { + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; // At the time we create the datafeed the user can access the network-data index that we have access to - new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + new DatafeedBuilder(datafeedId, jobId, "network-data") .setAggregations(aggregations) .setChunkingTimespan("300s") .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) @@ -712,7 +712,7 @@ public void testLookbackWithPipelineBucketAgg() throws Exception { + "\"airlines\":{\"terms\":{\"field\":\"airline.keyword\",\"size\":10}}," + "\"percentile95_airlines_count\":{\"percentiles_bucket\":" + "{\"buckets_path\":\"airlines._count\", \"percents\": [95]}}}}}"; - new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").setAggregations(aggregations).build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data").setAggregations(aggregations).build(); openJob(client(), jobId); @@ -801,7 +801,7 @@ public void testLookbackOnlyGivenAggregationsWithHistogramAndRollupIndex() throw + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "response").setAggregations(aggregations).build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup").setAggregations(aggregations).build(); openJob(client(), jobId); startDatafeedAndWaitUntilStopped(datafeedId); @@ -872,7 +872,7 @@ public void testLookbackWithoutPermissionsAndRollup() throws Exception { // At the time we create the datafeed the user can access the network-data index that we have access to - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup", "doc") + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs-rollup") .setAggregations(aggregations) .setChunkingTimespan("300s") .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) @@ -919,7 +919,7 @@ public void testLookbackWithSingleBucketAgg() throws Exception { + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airlineFilter\":{\"filter\":{\"term\": {\"airline\":\"AAA\"}}," + " \"aggregations\":{\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}}}"; - new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").setAggregations(aggregations).build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs").setAggregations(aggregations).build(); openJob(client(), jobId); startDatafeedAndWaitUntilStopped(datafeedId); @@ -936,7 +936,7 @@ public void testRealtime() throws Exception { String jobId = "job-realtime-1"; createJob(jobId, "airline"); String datafeedId = jobId + "-datafeed"; - new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data").build(); openJob(client(), jobId); Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); @@ -994,7 +994,7 @@ public void testForceDeleteWhileDatafeedIsRunning() throws Exception { String jobId = "job-realtime-2"; createJob(jobId, "airline"); String datafeedId = jobId + "-datafeed"; - new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); + new DatafeedBuilder(datafeedId, jobId, "airline-data").build(); openJob(client(), jobId); Request startRequest = new Request("POST", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start"); @@ -1059,7 +1059,7 @@ public LookbackOnlyTestHelper setShouldSucceedProcessing(boolean value) { public void execute() throws Exception { createJob(jobId, airlineVariant); String datafeedId = "datafeed-" + jobId; - new DatafeedBuilder(datafeedId, jobId, dataIndex, "response") + new DatafeedBuilder(datafeedId, jobId, dataIndex) .setScriptedFields(addScriptedFields ? "{\"airline\":{\"script\":{\"lang\":\"painless\",\"inline\":\"doc['airline'].value\"}}}" : null) .build(); @@ -1106,7 +1106,7 @@ private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHead } catch (Exception e) { throw new RuntimeException(e); } - }); + }, 60, TimeUnit.SECONDS); } private void waitUntilJobIsClosed(String jobId) throws Exception { @@ -1159,18 +1159,16 @@ private static class DatafeedBuilder { String datafeedId; String jobId; String index; - String type; boolean source; String scriptedFields; String aggregations; String authHeader = BASIC_AUTH_VALUE_SUPER_USER; String chunkingTimespan; - DatafeedBuilder(String datafeedId, String jobId, String index, String type) { + DatafeedBuilder(String datafeedId, String jobId, String index) { this.datafeedId = datafeedId; this.jobId = jobId; this.index = index; - this.type = type; } DatafeedBuilder setSource(boolean enableSource) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java new file mode 100644 index 0000000000000..3e89da92b6fd9 --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.junit.After; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class DatafeedWithAggsIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanup(){ + cleanUp(); + } + + public void testRealtime() throws Exception { + String dataIndex = "datafeed-with-aggs-rt-data"; + + // A job with a bucket_span of 2s + String jobId = "datafeed-with-aggs-rt-job"; + DataDescription.Builder dataDescription = new DataDescription.Builder(); + + Detector.Builder d = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(2)); + analysisConfig.setSummaryCountFieldName("doc_count"); + + Job.Builder jobBuilder = new Job.Builder(); + jobBuilder.setId(jobId); + + jobBuilder.setAnalysisConfig(analysisConfig); + jobBuilder.setDataDescription(dataDescription); + + // Datafeed with aggs + String datafeedId = jobId + "-feed"; + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder(datafeedId, jobId); + datafeedBuilder.setQueryDelay(TimeValue.timeValueMillis(100)); + datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(1)); + datafeedBuilder.setIndices(Collections.singletonList(dataIndex)); + + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); + aggs.addAggregator(AggregationBuilders.dateHistogram("time").field("time").interval(1000) + .subAggregation(AggregationBuilders.max("time").field("time"))); + datafeedBuilder.setParsedAggregations(aggs); + + DatafeedConfig datafeed = datafeedBuilder.build(); + + // Create stuff and open job + registerJob(jobBuilder); + putJob(jobBuilder); + registerDatafeed(datafeed); + putDatafeed(datafeed); + openJob(jobId); + + // Now let's index the data + client().admin().indices().prepareCreate(dataIndex) + .addMapping("type", "time", "type=date") + .get(); + + // Index a doc per second from a minute ago to a minute later + long now = System.currentTimeMillis(); + long aMinuteAgo = now - TimeValue.timeValueMinutes(1).millis(); + long aMinuteLater = now + TimeValue.timeValueMinutes(1).millis(); + long curTime = aMinuteAgo; + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + while (curTime < aMinuteLater) { + IndexRequest indexRequest = new IndexRequest(dataIndex); + indexRequest.source("time", curTime); + bulkRequestBuilder.add(indexRequest); + curTime += TimeValue.timeValueSeconds(1).millis(); + } + BulkResponse bulkResponse = bulkRequestBuilder + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index docs: " + bulkResponse.buildFailureMessage()); + } + + // And start datafeed in real-time mode + startDatafeed(datafeedId, 0L, null); + + // Wait until we finalize a bucket after now + assertBusy(() -> { + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(jobId); + getBucketsRequest.setExcludeInterim(true); + getBucketsRequest.setSort("timestamp"); + getBucketsRequest.setDescending(true); + List buckets = getBuckets(getBucketsRequest); + assertThat(buckets.size(), greaterThanOrEqualTo(1)); + assertThat(buckets.get(0).getTimestamp().getTime(), greaterThan(now)); + }, 30, TimeUnit.SECONDS); + + // Wrap up + StopDatafeedAction.Response stopJobResponse = stopDatafeed(datafeedId); + assertTrue(stopJobResponse.isStopped()); + assertBusy(() -> { + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED)); + }); + closeJob(jobId); + + // Assert we have not dropped any data - final buckets should contain 2 events each + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(jobId); + getBucketsRequest.setExcludeInterim(true); + List buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + if (bucket.getEventCount() != 2) { + fail("Bucket [" + bucket.getTimestamp().getTime() + "] has [" + bucket.getEventCount() + "] when 2 were expected"); + } + } + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java index ddebbe6038f19..8243c2e61e6f2 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DelayedDataDetectorIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -159,7 +159,7 @@ public void testMissingDataDetectionWithAggregationsAndQuery() throws Exception .subAggregation(avgAggregationBuilder) .field("time") .interval(TimeValue.timeValueMinutes(5).millis()))); - datafeedConfigBuilder.setParsedQuery(new RangeQueryBuilder("value").gte(numDocs/2)); + datafeedConfigBuilder.setParsedQuery(QueryBuilders.rangeQuery("value").gte(numDocs/2)); datafeedConfigBuilder.setFrequency(TimeValue.timeValueMinutes(5)); datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12))); @@ -251,6 +251,6 @@ private Bucket getLatestFinalizedBucket(String jobId) { } private DelayedDataDetector newDetector(Job job, DatafeedConfig datafeedConfig) { - return DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, client()); + return DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, client(), xContentRegistry()); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index 2a63ccaf41245..acead061369bd 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; @@ -39,6 +40,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -48,12 +50,11 @@ public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { private static final String DATA_INDEX = "delete-expired-data-test-data"; - private static final String DATA_TYPE = "doc"; @Before public void setUpData() throws IOException { client().admin().indices().prepareCreate(DATA_INDEX) - .addMapping(DATA_TYPE, "time", "type=date,format=epoch_millis") + .addMapping(SINGLE_MAPPING_NAME, "time", "type=date,format=epoch_millis") .get(); // We are going to create data for last 2 days @@ -67,7 +68,7 @@ public void setUpData() throws IOException { long timestamp = nowMillis - TimeValue.timeValueHours(totalBuckets - bucket).getMillis(); int bucketRate = bucket == anomalousBucket ? anomalousRate : normalRate; for (int point = 0; point < bucketRate; point++) { - IndexRequest indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + IndexRequest indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", timestamp); bulkRequestBuilder.add(indexRequest); } @@ -77,9 +78,6 @@ public void setUpData() throws IOException { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); assertThat(bulkResponse.hasFailures(), is(false)); - - // Ensure all data is searchable - client().admin().indices().prepareRefresh(DATA_INDEX).get(); } @After @@ -94,6 +92,17 @@ public void testDeleteExpiredDataGivenNothingToDelete() throws Exception { } public void testDeleteExpiredData() throws Exception { + // Index some unused state documents (more than 10K to test scrolling works) + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10010; i++) { + String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias()).id(docId); + indexRequest.source(Collections.emptyMap()); + bulkRequestBuilder.add(indexRequest); + } + ActionFuture indexUnusedStateDocsResponse = bulkRequestBuilder.execute(); + registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(1000L)); registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(1000L)); registerJob(newJobBuilder("snapshots-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); @@ -104,6 +113,8 @@ public void testDeleteExpiredData() throws Exception { long now = System.currentTimeMillis(); long oneDayAgo = now - TimeValue.timeValueHours(48).getMillis() - 1; + + // Start all jobs for (Job.Builder job : getJobs()) { putJob(job); @@ -117,7 +128,14 @@ public void testDeleteExpiredData() throws Exception { // Run up to a day ago openJob(job.getId()); startDatafeed(datafeedId, 0, now - TimeValue.timeValueHours(24).getMillis()); + } + + // Now let's wait for all jobs to be closed + for (Job.Builder job : getJobs()) { waitUntilJobIsClosed(job.getId()); + } + + for (Job.Builder job : getJobs()) { assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(47))); assertThat(getRecords(job.getId()).size(), equalTo(1)); List modelSnapshots = getModelSnapshots(job.getId()); @@ -126,7 +144,7 @@ public void testDeleteExpiredData() throws Exception { // Update snapshot timestamp to force it out of snapshot retention window String snapshotUpdate = "{ \"timestamp\": " + oneDayAgo + "}"; - UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + job.getId(), "doc", snapshotDocId); + UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + job.getId(), snapshotDocId); updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON); client().execute(UpdateAction.INSTANCE, updateSnapshotRequest).get(); @@ -143,6 +161,7 @@ public void testDeleteExpiredData() throws Exception { waitForecastToFinish(job.getId(), forecastDefaultExpiryId); waitForecastToFinish(job.getId(), forecastNoExpiryId); } + // Refresh to ensure the snapshot timestamp updates are visible client().admin().indices().prepareRefresh("*").get(); @@ -175,16 +194,8 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); } - // Index some unused state documents (more than 10K to test scrolling works) - BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - for (int i = 0; i < 10010; i++) { - String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); - IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), "doc", docId); - indexRequest.source(Collections.emptyMap()); - bulkRequestBuilder.add(indexRequest); - } - assertThat(bulkRequestBuilder.get().status(), equalTo(RestStatus.OK)); + // Before we call the delete-expired-data action we need to make sure the unused state docs were indexed + assertThat(indexUnusedStateDocsResponse.get().status(), equalTo(RestStatus.OK)); // Now call the action under test client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java similarity index 67% rename from x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java rename to x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java index 4cbeaf1dc482c..5689e7bfe5421 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsIT.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.junit.After; @@ -24,28 +27,25 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -/** - * Tests that interim results get updated correctly - */ -public class UpdateInterimResultsIT extends MlNativeAutodetectIntegTestCase { +public class InterimResultsIT extends MlNativeAutodetectIntegTestCase { - private static final String JOB_ID = "update-interim-test"; private static final long BUCKET_SPAN_SECONDS = 1000; private long time; @After - public void cleanUpTest() throws Exception { + public void cleanUpTest() { cleanUp(); } - public void test() throws Exception { + public void testInterimResultsUpdates() throws Exception { + String jobId = "test-interim-results-updates"; AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( Collections.singletonList(new Detector.Builder("max", "value").build())); analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeFormat("epoch"); - Job.Builder job = new Job.Builder(JOB_ID); + Job.Builder job = new Job.Builder(jobId); job.setAnalysisConfig(analysisConfig); job.setDataDescription(dataDescription); @@ -106,6 +106,47 @@ public void test() throws Exception { assertThat(bucket.get(0).getRecords().get(0).getActual().get(0), equalTo(16.0)); } + public void testNoInterimResultsAfterAdvancingBucket() throws Exception { + String jobId = "test-no-inerim-results-after-advancing-bucket"; + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Collections.singletonList(new Detector.Builder("count", null).build())); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + time = 1400000000; + + // push some data, flush job, verify no interim results + assertThat(postData(job.getId(), createData(50)).getProcessedRecordCount(), equalTo(50L)); + FlushJobAction.Response flushResponse = flushJob(job.getId(), false); + assertThat(getInterimResults(job.getId()).isEmpty(), is(true)); + + // advance time and request interim results + long lastFinalizedBucketEnd = flushResponse.getLastFinalizedBucketEnd().getTime(); + FlushJobAction.Request advanceTimeRequest = new FlushJobAction.Request(jobId); + advanceTimeRequest.setAdvanceTime(String.valueOf(lastFinalizedBucketEnd + BUCKET_SPAN_SECONDS * 1000)); + advanceTimeRequest.setCalcInterim(true); + assertThat(client().execute(FlushJobAction.INSTANCE, advanceTimeRequest).actionGet().isFlushed(), is(true)); + + List interimResults = getInterimResults(job.getId()); + assertThat(interimResults.size(), equalTo(1)); + + // We expect there are no records. The bucket count is low but at the same time + // it is too early into the bucket to consider it an anomaly. Let's verify that. + List records = interimResults.get(0).getRecords(); + List recordsJson = records.stream().map(Strings::toString).collect(Collectors.toList()); + assertThat("Found interim records: " + recordsJson, records.isEmpty(), is(true)); + + closeJob(jobId); + } + private String createData(int halfBuckets) { StringBuilder data = new StringBuilder(); for (int i = 0; i < halfBuckets; i++) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index cd33e1d80769e..9e076ca710821 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -124,6 +124,12 @@ protected Collection> transportClientPlugins() { return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class, ReindexPlugin.class); } + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Override protected Settings externalClusterClientSettings() { Path key; diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java index b9074f86bf356..4041aa78e122e 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java @@ -28,6 +28,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -36,12 +37,11 @@ public class ModelPlotsIT extends MlNativeAutodetectIntegTestCase { private static final String DATA_INDEX = "model-plots-test-data"; - private static final String DATA_TYPE = "doc"; @Before public void setUpData() { client().admin().indices().prepareCreate(DATA_INDEX) - .addMapping(DATA_TYPE, "time", "type=date,format=epoch_millis", "user", "type=keyword") + .addMapping(SINGLE_MAPPING_NAME, "time", "type=date,format=epoch_millis", "user", "type=keyword") .get(); List users = Arrays.asList("user_1", "user_2", "user_3"); @@ -53,7 +53,7 @@ public void setUpData() { for (int bucket = 0; bucket < totalBuckets; bucket++) { long timestamp = nowMillis - TimeValue.timeValueHours(totalBuckets - bucket).getMillis(); for (String user : users) { - IndexRequest indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + IndexRequest indexRequest = new IndexRequest(DATA_INDEX); indexRequest.source("time", timestamp, "user", user); bulkRequestBuilder.add(indexRequest); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java index 6f885744b21a4..a68fa2fe02a8d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java @@ -5,12 +5,17 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; import org.elasticsearch.xpack.core.ml.action.PersistJobAction; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.junit.After; @@ -18,8 +23,14 @@ import java.util.List; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + public class PersistJobIT extends MlNativeAutodetectIntegTestCase { + private static final long BUCKET_SPAN_SECONDS = 300; + private static final TimeValue BUCKET_SPAN = TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS); + @After public void cleanUpJobs() { cleanUp(); @@ -39,11 +50,149 @@ public void testPersistJob() throws Exception { }); } - private void runJob(String jobId) throws Exception { - TimeValue bucketSpan = TimeValue.timeValueMinutes(5); + // check that state is persisted after time has been advanced even if no new data is seen in the interim + public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() throws Exception { + String jobId = "time-advanced-after-no-new-data-test"; + + // open and run a job with a small data set + runJob(jobId); + FlushJobAction.Response flushResponse = flushJob(jobId, true); + + closeJob(jobId); + long job1CloseTime = System.currentTimeMillis() / 1000; + + // Check that state has been persisted + SearchResponse stateDocsResponse1 = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) + .setFetchSource(false) + .setTrackTotalHits(true) + .setSize(10000) + .get(); + + int numQuantileRecords = 0; + int numStateRecords = 0; + for (SearchHit hit : stateDocsResponse1.getHits().getHits()) { + logger.info("1: " + hit.getId()); + if (hit.getId().contains("quantiles")) { + ++numQuantileRecords; + } else if (hit.getId().contains("model_state")) { + ++numStateRecords; + } + } + assertThat(stateDocsResponse1.getHits().getTotalHits().value, equalTo(2L)); + assertThat(numQuantileRecords, equalTo(1)); + assertThat(numStateRecords, equalTo(1)); + + // To generate unique snapshot IDs ensure that there is at least a 1s delay between the + // time each job was closed + assertBusy(() -> { + long timeNow = System.currentTimeMillis() / 1000; + assertFalse(job1CloseTime >= timeNow); + }); + + // re-open the job + openJob(jobId); + + // advance time + long lastFinalizedBucketEnd = flushResponse.getLastFinalizedBucketEnd().getTime(); + FlushJobAction.Request advanceTimeRequest = new FlushJobAction.Request(jobId); + advanceTimeRequest.setAdvanceTime(String.valueOf(lastFinalizedBucketEnd + BUCKET_SPAN_SECONDS * 1000)); + advanceTimeRequest.setCalcInterim(false); + assertThat(client().execute(FlushJobAction.INSTANCE, advanceTimeRequest).actionGet().isFlushed(), is(true)); + + closeJob(jobId); + + // Check that a new state record exists. + SearchResponse stateDocsResponse2 = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) + .setFetchSource(false) + .setTrackTotalHits(true) + .setSize(10000) + .get(); + + numQuantileRecords = 0; + numStateRecords = 0; + for (SearchHit hit : stateDocsResponse2.getHits().getHits()) { + logger.info("2: " + hit.getId()); + if (hit.getId().contains("quantiles")) { + ++numQuantileRecords; + } else if (hit.getId().contains("model_state")) { + ++numStateRecords; + } + } + + assertThat(stateDocsResponse2.getHits().getTotalHits().value, equalTo(3L)); + assertThat(numQuantileRecords, equalTo(1)); + assertThat(numStateRecords, equalTo(2)); + + deleteJob(jobId); + } + + // Check an edge case where time is manually advanced before any valid data is seen + public void testPersistJobOnGracefulShutdown_givenNoDataAndTimeAdvanced() throws Exception { + String jobId = "no-data-and-time-advanced-test"; + + createAndOpenJob(jobId); + + // Manually advance time. + FlushJobAction.Request advanceTimeRequest = new FlushJobAction.Request(jobId); + advanceTimeRequest.setAdvanceTime(String.valueOf(BUCKET_SPAN_SECONDS * 1000)); + advanceTimeRequest.setCalcInterim(false); + assertThat(client().execute(FlushJobAction.INSTANCE, advanceTimeRequest).actionGet().isFlushed(), is(true)); + + closeJob(jobId); + + // Check that state has been persisted + SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) + .setFetchSource(false) + .setTrackTotalHits(true) + .setSize(10000) + .get(); + + int numQuantileRecords = 0; + int numStateRecords = 0; + for (SearchHit hit : stateDocsResponse.getHits().getHits()) { + logger.info(hit.getId()); + if (hit.getId().contains("quantiles")) { + ++numQuantileRecords; + } else if (hit.getId().contains("model_state")) { + ++numStateRecords; + } + } + assertThat(stateDocsResponse.getHits().getTotalHits().value, equalTo(2L)); + assertThat(numQuantileRecords, equalTo(1)); + assertThat(numStateRecords, equalTo(1)); + + // now check that the job can be happily restored - even though no data has been seen + AcknowledgedResponse ack = openJob(jobId); + assertTrue(ack.isAcknowledged()); + + closeJob(jobId); + deleteJob(jobId); + } + + // Check an edge case where a job is opened and then immediately closed + public void testPersistJobOnGracefulShutdown_givenNoDataAndNoTimeAdvance() throws Exception { + String jobId = "no-data-and-no-time-advance-test"; + + createAndOpenJob(jobId); + + closeJob(jobId); + + // Check that state has not been persisted + SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) + .setFetchSource(false) + .setTrackTotalHits(true) + .setSize(10000) + .get(); + + assertThat(stateDocsResponse.getHits().getTotalHits().value, equalTo(0L)); + + deleteJob(jobId); + } + + private void createAndOpenJob(String jobId) throws Exception { Detector.Builder detector = new Detector.Builder("count", null); AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Arrays.asList(detector.build())); - analysisConfig.setBucketSpan(bucketSpan); + analysisConfig.setBucketSpan(BUCKET_SPAN); Job.Builder job = new Job.Builder(jobId); job.setAnalysisConfig(analysisConfig); job.setDataDescription(new DataDescription.Builder()); @@ -51,7 +200,11 @@ private void runJob(String jobId) throws Exception { putJob(job); openJob(job.getId()); - List data = generateData(System.currentTimeMillis(), bucketSpan, 10, bucketIndex -> randomIntBetween(10, 20)); - postData(job.getId(), data.stream().collect(Collectors.joining())); + } + + private void runJob(String jobId) throws Exception { + createAndOpenJob(jobId); + List data = generateData(System.currentTimeMillis(), BUCKET_SPAN, 10, bucketIndex -> randomIntBetween(10, 20)); + postData(jobId, data.stream().collect(Collectors.joining())); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java index 57c9245e2c5b3..f97c27e4ccca4 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDataCounts; @@ -33,7 +34,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.isEmptyString; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -88,12 +88,12 @@ public void testEnableUpgradeMode() throws Exception { GetJobsStatsAction.Response.JobStats jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(jobStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(jobStats.getNode(), is(nullValue())); GetDatafeedsStatsAction.Response.DatafeedStats datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(datafeedStats.getAssignmentExplanation(), equalTo(AWAITING_UPGRADE.getExplanation())); assertThat(datafeedStats.getNode(), is(nullValue())); Job.Builder job = createScheduledJob("job-should-not-open"); @@ -126,13 +126,11 @@ public void testEnableUpgradeMode() throws Exception { jobStats = getJobStats(jobId).get(0); assertThat(jobStats.getState(), equalTo(JobState.OPENED)); - assertThat(jobStats.getAssignmentExplanation(), isEmptyString()); - assertThat(jobStats.getNode(), is(not(nullValue()))); + assertThat(jobStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); datafeedStats = getDatafeedStats(datafeedId); assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); - assertThat(datafeedStats.getAssignmentExplanation(), isEmptyString()); - assertThat(datafeedStats.getNode(), is(not(nullValue()))); + assertThat(datafeedStats.getAssignmentExplanation(), not(equalTo(AWAITING_UPGRADE.getExplanation()))); } private void startRealtime(String jobId) throws Exception { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java index 35ec721a94710..bff85d691b4b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/InvalidLicenseEnforcer.java @@ -3,17 +3,19 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.license.LicenseStateListener; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -public class InvalidLicenseEnforcer { +public class InvalidLicenseEnforcer implements LicenseStateListener { private static final Logger logger = LogManager.getLogger(InvalidLicenseEnforcer.class); @@ -22,17 +24,32 @@ public class InvalidLicenseEnforcer { private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; + private volatile boolean licenseStateListenerRegistered; + InvalidLicenseEnforcer(XPackLicenseState licenseState, ThreadPool threadPool, DatafeedManager datafeedManager, AutodetectProcessManager autodetectProcessManager) { this.threadPool = threadPool; this.licenseState = licenseState; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; - licenseState.addListener(this::closeJobsAndDatafeedsIfLicenseExpired); } - private void closeJobsAndDatafeedsIfLicenseExpired() { + void listenForLicenseStateChanges() { + /* + * Registering this as a listener can not be done in the constructor because otherwise it would be unsafe publication of this. That + * is, it would expose this to another thread before the constructor had finished. Therefore, we have a dedicated method to register + * the listener that is invoked after the constructor has returned. + */ + assert licenseStateListenerRegistered == false; + licenseState.addListener(this); + licenseStateListenerRegistered = true; + } + + @Override + public void licenseStateChanged() { + assert licenseStateListenerRegistered; if (licenseState.isMachineLearningAllowed() == false) { + // if the license has expired, close jobs and datafeeds threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -47,4 +64,5 @@ protected void doRun() throws Exception { }); } } + } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index e5376bccb1745..5d6a1857a6c0b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -111,7 +111,6 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; -import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.ml.action.TransportCloseJobAction; @@ -250,6 +249,7 @@ import java.util.function.UnaryOperator; import static java.util.Collections.emptyList; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlugin, PersistentTaskPlugin { public static final String NAME = "ml"; @@ -271,8 +271,14 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final String MACHINE_MEMORY_NODE_ATTR = "ml.machine_memory"; public static final Setting CONCURRENT_JOB_ALLOCATIONS = Setting.intSetting("xpack.ml.node_concurrent_job_allocations", 2, 0, Property.Dynamic, Property.NodeScope); + // Values higher than 100% are allowed to accommodate use cases where swapping has been determined to be acceptable. + // Anomaly detector jobs only use their full model memory during background persistence, and this is deliberately + // staggered, so with large numbers of jobs few will generally be persisting state at the same time. + // Settings higher than available memory are only recommended for OEM type situations where a wrapper tightly + // controls the types of jobs that can be created, and each job alone is considerably smaller than what each node + // can handle. public static final Setting MAX_MACHINE_MEMORY_PERCENT = - Setting.intSetting("xpack.ml.max_machine_memory_percent", 30, 5, 90, Property.Dynamic, Property.NodeScope); + Setting.intSetting("xpack.ml.max_machine_memory_percent", 30, 5, 200, Property.Dynamic, Property.NodeScope); public static final Setting MAX_LAZY_ML_NODES = Setting.intSetting("xpack.ml.max_lazy_ml_nodes", 0, 0, 3, Property.Dynamic, Property.NodeScope); @@ -386,10 +392,18 @@ public Collection createComponents(Client client, ClusterService cluster Auditor auditor = new Auditor(client, clusterService.getNodeName()); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client, xContentRegistry); DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(client, clusterService, threadPool); - JobManager jobManager = new JobManager(env, settings, jobResultsProvider, clusterService, auditor, threadPool, client, notifier); + JobManager jobManager = new JobManager(env, + settings, + jobResultsProvider, + clusterService, + auditor, + threadPool, + client, + notifier, + xContentRegistry); // special holder for @link(MachineLearningFeatureSetUsage) which needs access to job manager if ML is enabled JobManagerHolder jobManagerHolder = new JobManagerHolder(jobManager); @@ -435,13 +449,15 @@ public Collection createComponents(Client client, ClusterService cluster DatafeedManager datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder, System::currentTimeMillis, auditor, autodetectProcessManager); this.datafeedManager.set(datafeedManager); - MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, - autodetectProcessManager); MlMemoryTracker memoryTracker = new MlMemoryTracker(settings, clusterService, threadPool, jobManager, jobResultsProvider); this.memoryTracker.set(memoryTracker); + MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(environment, clusterService, datafeedManager, + autodetectProcessManager, memoryTracker); - // This object's constructor attaches to the license state, so there's no need to retain another reference to it - new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + // this object registers as a license state listener, and is never removed, so there's no need to retain another reference to it + final InvalidLicenseEnforcer enforcer = + new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + enforcer.listenForLicenseStateChanges(); // run node startup tasks autodetectProcessManager.onNodeStartup(); @@ -650,7 +666,7 @@ public UnaryOperator> getIndexTemplateMetaDat try (XContentBuilder auditMapping = ElasticsearchMappings.auditMessageMapping()) { IndexTemplateMetaData notificationMessageTemplate = IndexTemplateMetaData.builder(AuditorField.NOTIFICATIONS_INDEX) - .putMapping(AuditMessage.TYPE.getPreferredName(), Strings.toString(auditMapping)) + .putMapping(SINGLE_MAPPING_NAME, Strings.toString(auditMapping)) .patterns(Collections.singletonList(AuditorField.NOTIFICATIONS_INDEX)) .version(Version.CURRENT.id) .settings(Settings.builder() @@ -675,7 +691,7 @@ public UnaryOperator> getIndexTemplateMetaDat .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)) .version(Version.CURRENT.id) - .putMapping(MlMetaIndex.TYPE, Strings.toString(docMapping)) + .putMapping(SINGLE_MAPPING_NAME, Strings.toString(docMapping)) .build(); templates.put(MlMetaIndex.INDEX_NAME, metaTemplate); } catch (IOException e) { @@ -694,7 +710,7 @@ public UnaryOperator> getIndexTemplateMetaDat .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)) .version(Version.CURRENT.id) - .putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(configMapping)) + .putMapping(SINGLE_MAPPING_NAME, Strings.toString(configMapping)) .build(); templates.put(AnomalyDetectorsIndex.configIndexName(), configTemplate); } catch (IOException e) { @@ -708,15 +724,16 @@ public UnaryOperator> getIndexTemplateMetaDat .settings(Settings.builder() .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)) - .putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(stateMapping)) + .putMapping(SINGLE_MAPPING_NAME, Strings.toString(stateMapping)) .version(Version.CURRENT.id) .build(); + templates.put(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, stateTemplate); } catch (IOException e) { logger.error("Error loading the template for the " + AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX + " index", e); } - try (XContentBuilder docMapping = ElasticsearchMappings.resultsMapping()) { + try (XContentBuilder docMapping = ElasticsearchMappings.resultsMapping(SINGLE_MAPPING_NAME)) { IndexTemplateMetaData jobResultsTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobResultsIndexPrefix()) .patterns(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*")) .settings(Settings.builder() @@ -728,7 +745,7 @@ public UnaryOperator> getIndexTemplateMetaDat .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async") // set the default all search field .put(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), ElasticsearchMappings.ALL_FIELD_VALUES)) - .putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(docMapping)) + .putMapping(SINGLE_MAPPING_NAME, Strings.toString(docMapping)) .version(Version.CURRENT.id) .build(); templates.put(AnomalyDetectorsIndex.jobResultsIndexPrefix(), jobResultsTemplate); @@ -763,8 +780,8 @@ static long machineMemoryFromStats(OsStats stats) { if (containerLimitStr != null) { BigInteger containerLimit = new BigInteger(containerLimitStr); if ((containerLimit.compareTo(BigInteger.valueOf(mem)) < 0 && containerLimit.compareTo(BigInteger.ZERO) > 0) - // mem < 0 means the value couldn't be obtained for some reason - || (mem < 0 && containerLimit.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) < 0)) { + // mem <= 0 means the value couldn't be obtained for some reason + || (mem <= 0 && containerLimit.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) < 0)) { mem = containerLimit.longValue(); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index 74948986d7013..d1673dd3c914c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; @@ -66,6 +65,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -246,10 +246,14 @@ public ClusterState execute(ClusterState currentState) { currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE), currentState.nodes()); ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metaData(MetaData.builder(currentState.getMetaData()) - .putCustom(MlMetadata.TYPE, removed.mlMetadata) - .putCustom(PersistentTasksCustomMetaData.TYPE, updatedTasks) - .build()); + MetaData.Builder metaDataBuilder = MetaData.builder(currentState.getMetaData()) + .putCustom(MlMetadata.TYPE, removed.mlMetadata); + + // If there are no tasks in the cluster state metadata to begin with, this could be null. + if (updatedTasks != null) { + metaDataBuilder = metaDataBuilder.putCustom(PersistentTasksCustomMetaData.TYPE, updatedTasks); + } + newState.metaData(metaDataBuilder.build()); return newState.build(); } @@ -414,7 +418,7 @@ private void addDatafeedIndexRequests(Collection datafeedConfigs } private IndexRequest indexRequest(ToXContentObject source, String documentId, ToXContent.Params params) { - IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName(), ElasticsearchMappings.DOC_TYPE, documentId); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName()).id(documentId); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { indexRequest.source(source.toXContent(builder, params)); @@ -439,9 +443,9 @@ public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener listen logger.debug("taking a snapshot of ml_metadata"); String documentId = "ml-config"; - IndexRequestBuilder indexRequest = client.prepareIndex(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), - ElasticsearchMappings.DOC_TYPE, documentId) - .setOpType(DocWriteRequest.OpType.CREATE); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias()) + .id(documentId) + .opType(DocWriteRequest.OpType.CREATE); ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -449,7 +453,7 @@ public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener listen mlMetadata.toXContent(builder, params); builder.endObject(); - indexRequest.setSource(builder); + indexRequest.source(builder); } catch (IOException e) { logger.error("failed to serialise ml_metadata", e); listener.onFailure(e); @@ -458,7 +462,7 @@ public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener listen AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterService.state(), ActionListener.wrap( r -> { - executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest.request(), + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, ActionListener.wrap( indexResponse -> { listener.onResponse(indexResponse.getResult() == DocWriteResponse.Result.CREATED); @@ -489,7 +493,7 @@ private void createConfigIndex(ActionListener listener) { .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW) ); - createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, ElasticsearchMappings.configMapping()); + createIndexRequest.mapping(SINGLE_MAPPING_NAME, ElasticsearchMappings.configMapping()); } catch (Exception e) { logger.error("error writing the .ml-config mappings", e); listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 302d9a7611d96..06d9b749e1a89 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; +import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -20,16 +21,14 @@ public class MlLifeCycleService { private final Environment environment; private final DatafeedManager datafeedManager; private final AutodetectProcessManager autodetectProcessManager; - - public MlLifeCycleService(Environment environment, ClusterService clusterService) { - this(environment, clusterService, null, null); - } + private final MlMemoryTracker memoryTracker; public MlLifeCycleService(Environment environment, ClusterService clusterService, DatafeedManager datafeedManager, - AutodetectProcessManager autodetectProcessManager) { + AutodetectProcessManager autodetectProcessManager, MlMemoryTracker memoryTracker) { this.environment = environment; this.datafeedManager = datafeedManager; this.autodetectProcessManager = autodetectProcessManager; + this.memoryTracker = memoryTracker; clusterService.addLifecycleListener(new LifecycleListener() { @Override public void beforeStop() { @@ -45,7 +44,7 @@ public synchronized void stop() { // datafeeds, so they get reallocated. We have to do this first, otherwise the datafeeds // could fail if they send data to a dead autodetect process. if (datafeedManager != null) { - datafeedManager.isolateAllDatafeedsOnThisNode(); + datafeedManager.isolateAllDatafeedsOnThisNodeBeforeShutdown(); } NativeController nativeController = NativeControllerHolder.getNativeController(environment); if (nativeController != null) { @@ -59,5 +58,8 @@ public synchronized void stop() { } catch (IOException e) { // We're stopping anyway, so don't let this complicate the shutdown sequence } + if (memoryTracker != null) { + memoryTracker.stop(); + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java index 6fd316f31312a..29896b438bedc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarEventAction.java @@ -55,7 +55,7 @@ protected void doExecute(Task task, DeleteCalendarEventAction.Request request, ActionListener calendarListener = ActionListener.wrap( calendar -> { - GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, eventId); + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, eventId); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap( getResponse -> { if (getResponse.isExists() == false) { @@ -89,7 +89,7 @@ protected void doExecute(Task task, DeleteCalendarEventAction.Request request, } private void deleteEvent(String eventId, Calendar calendar, ActionListener listener) { - DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, eventId); + DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, eventId); deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, DeleteAction.INSTANCE, deleteRequest, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index bc4b25af1feb9..a94436d5c6920 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -84,8 +84,7 @@ private static List findJobsUsingFilter(List jobs, String filterId) } private void deleteFilter(String filterId, ActionListener listener) { - DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, - MlFilter.documentId(filterId)); + DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlFilter.documentId(filterId)); BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); bulkRequestBuilder.add(deleteRequest); bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index 9d76844121cbb..14491d7ca9c61 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.utils.VoidChainTaskExecutor; @@ -71,8 +70,7 @@ protected void masterOperation(FinalizeJobExecutionAction.Request request, Clust Map update = Collections.singletonMap(Job.FINISHED_TIME.getPreferredName(), new Date()); for (String jobId: request.getJobIds()) { - UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); updateRequest.retryOnConflict(3); updateRequest.doc(update); updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java index 2ff18b689a7da..a45fbee40b014 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetFiltersAction.java @@ -68,7 +68,7 @@ protected void doExecute(Task task, GetFiltersAction.Request request, ActionList } private void getFilter(String filterId, ActionListener listener) { - GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId)); + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlFilter.documentId(filterId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override public void onResponse(GetResponse getDocResponse) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java index 49e07c30b18f9..fdb5bb6ec093f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPostCalendarEventsAction.java @@ -62,7 +62,7 @@ protected void doExecute(Task task, PostCalendarEventsAction.Request request, BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); for (ScheduledEvent event: events) { - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { indexRequest.source(event.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index c87fabe0b771d..89ad54e9c1802 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -38,17 +39,19 @@ public class TransportPreviewDatafeedAction extends HandledTransportAction) PreviewDatafeedAction.Request::new); this.threadPool = threadPool; this.client = client; this.jobConfigProvider = jobConfigProvider; this.datafeedConfigProvider = datafeedConfigProvider; + this.xContentRegistry = xContentRegistry; } @Override @@ -67,7 +70,7 @@ protected void doExecute(Task task, PreviewDatafeedAction.Request request, Actio // NB: this is using the client from the transport layer, NOT the internal client. // This is important because it means the datafeed search will fail if the user // requesting the preview doesn't have permission to search the relevant indices. - DataExtractorFactory.create(client, previewDatafeed.build(), jobBuilder.build(), + DataExtractorFactory.create(client, previewDatafeed.build(), jobBuilder.build(), xContentRegistry, new ActionListener() { @Override public void onResponse(DataExtractorFactory dataExtractorFactory) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 8355b9cff95e6..7ff04f5ed018a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -49,7 +49,7 @@ public TransportPutCalendarAction(TransportService transportService, ActionFilte protected void doExecute(Task task, PutCalendarAction.Request request, ActionListener listener) { Calendar calendar = request.getCalendar(); - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, calendar.documentId()); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME).id(calendar.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { indexRequest.source(calendar.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index c5a8fee50a581..993fe548cc952 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -62,6 +62,7 @@ public class TransportPutDatafeedAction extends TransportMasterNodeAction listener.onFailure(validationError); return; } - DatafeedConfig.validateAggregations(request.getDatafeed().getParsedAggregations()); + DatafeedConfig.validateAggregations(request.getDatafeed().getParsedAggregations(xContentRegistry)); CheckedConsumer validationOk = ok -> { datafeedConfigProvider.putDatafeedConfig(request.getDatafeed(), headers, ActionListener.wrap( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index 225aa8c5b933d..0c1f37f4256cb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -48,7 +48,7 @@ public TransportPutFilterAction(TransportService transportService, ActionFilters @Override protected void doExecute(Task task, PutFilterAction.Request request, ActionListener listener) { MlFilter filter = request.getFilter(); - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME).id(filter.documentId()); indexRequest.opType(DocWriteRequest.OpType.CREATE); indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index edc31f1e896b7..58ff31a6bc847 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -197,13 +197,9 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) .isEmpty() && - // Datafeeds to wait for a non-"Awaiting upgrade" assignment and for the job task allocations to converge - // If we do not wait, deleting datafeeds, or attempting to unallocate them again causes issues as the - // job's task allocationId could have changed during either process. + // Wait for datafeeds to not be "Awaiting upgrade" persistentTasksCustomMetaData.findTasks(DATAFEED_TASK_NAME, - (t) -> - t.getAssignment().equals(AWAITING_UPGRADE) || - t.getAssignment().getExplanation().contains("state is stale")) + (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) .isEmpty(), request.timeout(), ActionListener.wrap(r -> wrappedListener.onResponse(new AcknowledgedResponse(true)), wrappedListener::onFailure) @@ -263,6 +259,9 @@ private void unassignPersistentTasks(PersistentTasksCustomMetaData tasksCustomMe .sorted(Comparator.comparing(PersistentTask::getTaskName)) .collect(Collectors.toList()); + logger.info("Un-assigning persistent tasks : " + + datafeedAndJobTasks.stream().map(PersistentTask::getId).collect(Collectors.joining(", ", "[ ", " ]"))); + TypedChainTaskExecutor> chainTaskExecutor = new TypedChainTaskExecutor<>(client.threadPool().executor(executor()), r -> true, @@ -287,6 +286,7 @@ private void isolateDatafeeds(PersistentTasksCustomMetaData tasksCustomMetaData, ActionListener> listener) { Set datafeedsToIsolate = MlTasks.startedDatafeedIds(tasksCustomMetaData); + logger.info("Isolating datafeeds: " + datafeedsToIsolate.toString()); TypedChainTaskExecutor isolateDatafeedsExecutor = new TypedChainTaskExecutor<>(client.threadPool().executor(executor()), r -> true, ex -> true); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index f81fcfbfb1d2e..1a0d1b84c87d5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; @@ -78,6 +79,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction deprecationWarnings = new ArrayList<>(); - deprecationWarnings.addAll(datafeed.getAggDeprecations()); - deprecationWarnings.addAll(datafeed.getQueryDeprecations()); + deprecationWarnings.addAll(datafeed.getAggDeprecations(xContentRegistry)); + deprecationWarnings.addAll(datafeed.getQueryDeprecations(xContentRegistry)); if (deprecationWarnings.isEmpty() == false) { String msg = "datafeed [" + datafeed.getId() +"] configuration has deprecations. [" + Strings.collectionToDelimitedString(deprecationWarnings, ", ") + "]"; @@ -200,8 +206,8 @@ public void onFailure(Exception e) { jobBuilder -> { try { Job job = jobBuilder.build(); - validate(job, datafeedConfigHolder.get(), tasks); - auditDeprecations(datafeedConfigHolder.get(), job, auditor); + validate(job, datafeedConfigHolder.get(), tasks, xContentRegistry); + auditDeprecations(datafeedConfigHolder.get(), job, auditor, xContentRegistry); createDataExtrator.accept(job); } catch (Exception e) { listener.onFailure(e); @@ -231,7 +237,7 @@ public void onFailure(Exception e) { private void createDataExtractor(Job job, DatafeedConfig datafeed, StartDatafeedAction.DatafeedParams params, ActionListener> listener) { - DataExtractorFactory.create(client, datafeed, job, ActionListener.wrap( + DataExtractorFactory.create(client, datafeed, job, xContentRegistry, ActionListener.wrap( dataExtractorFactory -> persistentTasksService.sendStartRequest(MlTasks.datafeedTaskId(params.getDatafeedId()), MlTasks.DATAFEED_TASK_NAME, params, listener) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 636138a855bce..cbd55bb60d896 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; @@ -104,7 +103,7 @@ protected void doExecute(Task task, StopDatafeedAction.Request request, ActionLi final DiscoveryNodes nodes = state.nodes(); if (nodes.isLocalNodeElectedMaster() == false) { // Delegates stop datafeed to elected master node, so it becomes the coordinating node. - // See comment in StartDatafeedAction.Transport class for more information. + // See comment in TransportStartDatafeedAction for more information. if (nodes.getMasterNode() == null) { listener.onFailure(new MasterNotDiscoveredException("no known master node")); } else { @@ -142,13 +141,21 @@ private void normalStopDatafeed(Task task, StopDatafeedAction.Request request, A Set executorNodes = new HashSet<>(); for (String datafeedId : startedDatafeeds) { PersistentTasksCustomMetaData.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); - if (datafeedTask == null || datafeedTask.isAssigned() == false) { - String message = "Cannot stop datafeed [" + datafeedId + "] because the datafeed does not have an assigned node." + - " Use force stop to stop the datafeed"; - listener.onFailure(ExceptionsHelper.conflictStatusException(message)); - return; - } else { + if (datafeedTask == null) { + // This should not happen, because startedDatafeeds was derived from the same tasks that is passed to this method + String msg = "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found."; + assert datafeedTask != null : msg; + logger.error(msg); + } else if (datafeedTask.isAssigned()) { executorNodes.add(datafeedTask.getExecutorNode()); + } else { + // This is the easy case - the datafeed is not currently assigned to a node, + // so can be gracefully stopped simply by removing its persistent task. (Usually + // a graceful stop cannot be achieved by simply removing the persistent task, but + // if the datafeed has no running code then graceful/forceful are the same.) + // The listener here can be a no-op, as waitForDatafeedStopped() already waits for + // these persistent tasks to disappear. + persistentTasksService.sendRemoveRequest(datafeedTask.getId(), ActionListener.wrap(r -> {}, e -> {})); } } @@ -198,9 +205,10 @@ public void onFailure(Exception e) { } }); } else { - String msg = "Requested datafeed [" + request.getDatafeedId() + "] be force-stopped, but " + - "datafeed's task could not be found."; - logger.warn(msg); + // This should not happen, because startedDatafeeds was derived from the same tasks that is passed to this method + String msg = "Requested datafeed [" + datafeedId + "] be force-stopped, but datafeed's task could not be found."; + assert datafeedTask != null : msg; + logger.error(msg); final int slot = counter.incrementAndGet(); failures.set(slot - 1, new RuntimeException(msg)); if (slot == startedDatafeeds.size()) { @@ -248,19 +256,18 @@ protected void doRun() throws Exception { private void sendResponseOrFailure(String datafeedId, ActionListener listener, AtomicArray failures) { - List catchedExceptions = failures.asList(); - if (catchedExceptions.size() == 0) { + List caughtExceptions = failures.asList(); + if (caughtExceptions.size() == 0) { listener.onResponse(new StopDatafeedAction.Response(true)); return; } - String msg = "Failed to stop datafeed [" + datafeedId + "] with [" + catchedExceptions.size() + String msg = "Failed to stop datafeed [" + datafeedId + "] with [" + caughtExceptions.size() + "] failures, rethrowing last, all Exceptions: [" - + catchedExceptions.stream().map(Exception::getMessage).collect(Collectors.joining(", ")) + + caughtExceptions.stream().map(Exception::getMessage).collect(Collectors.joining(", ")) + "]"; - ElasticsearchException e = new ElasticsearchException(msg, - catchedExceptions.get(0)); + ElasticsearchException e = new ElasticsearchException(msg, caughtExceptions.get(0)); listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index 09a8f219afcf4..b7029eff79cd8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -49,7 +49,7 @@ public TransportUpdateDatafeedAction(Settings settings, TransportService transpo indexNameExpressionResolver, UpdateDatafeedAction.Request::new); datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); - jobConfigProvider = new JobConfigProvider(client); + jobConfigProvider = new JobConfigProvider(client, xContentRegistry); migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index fe5ae7eb6e8bf..1fe298fe8cc51 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -103,7 +103,7 @@ private void updateFilter(FilterWithSeqNo filterWithVersion, UpdateFilterAction. private void indexUpdatedFilter(MlFilter filter, final long seqNo, final long primaryTerm, UpdateFilterAction.Request request, ActionListener listener) { - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME).id(filter.documentId()); indexRequest.setIfSeqNo(seqNo); indexRequest.setIfPrimaryTerm(primaryTerm); indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); @@ -139,7 +139,7 @@ public void onFailure(Exception e) { } private void getFilterWithVersion(String filterId, ActionListener listener) { - GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId)); + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlFilter.documentId(filterId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override public void onResponse(GetResponse getDocResponse) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java index bf252d9b83e09..419089697451f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; @@ -80,8 +79,7 @@ private static Result applyUpdate(UpdateModelSnapshotAction.Reque } private void indexModelSnapshot(Result modelSnapshot, Consumer handler, Consumer errorHandler) { - IndexRequest indexRequest = new IndexRequest(modelSnapshot.index, ElasticsearchMappings.DOC_TYPE, - ModelSnapshot.documentId(modelSnapshot.result)); + IndexRequest indexRequest = new IndexRequest(modelSnapshot.index).id(ModelSnapshot.documentId(modelSnapshot.result)); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { modelSnapshot.result.toXContent(builder, ToXContent.EMPTY_PARAMS); indexRequest.source(builder); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 85f2489e6b0e5..db6e094714701 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -49,7 +49,6 @@ import java.util.function.Supplier; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; class DatafeedJob { @@ -237,7 +236,7 @@ private String addAndSetDelayedDataAnnotation(Annotation annotation) { try (XContentBuilder xContentBuilder = annotation.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) { IndexRequest request = new IndexRequest(AnnotationIndex.WRITE_ALIAS_NAME); request.source(xContentBuilder); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { IndexResponse response = client.index(request).actionGet(); lastDataCheckAnnotation = annotation; return response.getId(); @@ -261,7 +260,7 @@ private void updateAnnotation(Annotation annotation) { IndexRequest indexRequest = new IndexRequest(AnnotationIndex.WRITE_ALIAS_NAME); indexRequest.id(lastDataCheckAnnotationId); indexRequest.source(xContentBuilder); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.index(indexRequest).actionGet(); lastDataCheckAnnotation = updatedAnnotation; } @@ -380,7 +379,7 @@ private void run(long start, long end, FlushJobAction.Request flushRequest) thro } } - lastEndTimeMs = Math.max(lastEndTimeMs == null ? 0 : lastEndTimeMs, end - 1); + lastEndTimeMs = Math.max(lastEndTimeMs == null ? 0 : lastEndTimeMs, dataExtractor.getEndTime() - 1); LOGGER.debug("[{}] Complete iterating data extractor [{}], [{}], [{}], [{}], [{}]", jobId, error, recordCount, lastEndTimeMs, isRunning(), dataExtractor.isCancelled()); @@ -411,7 +410,7 @@ private DataCounts postData(InputStream inputStream, XContentType xContentType) ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); Streams.copy(inputStream, outputStream); request.setContent(new BytesArray(outputStream.toByteArray()), xContentType); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { PostDataAction.Response response = client.execute(PostDataAction.INSTANCE, request).actionGet(); return response.getDataCounts(); } @@ -440,7 +439,7 @@ private long toIntervalStartEpochMs(long epochMs) { private FlushJobAction.Response flushJob(FlushJobAction.Request flushRequest) { try { LOGGER.trace("[" + jobId + "] Sending flush request"); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { return client.execute(FlushJobAction.INSTANCE, flushRequest).actionGet(); } } catch (Exception e) { @@ -465,7 +464,7 @@ private boolean shouldPersistAfterLookback(boolean isLookbackOnly) { private void sendPersistRequest() { try { LOGGER.trace("[" + jobId + "] Sending persist request"); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.execute(PersistJobAction.INSTANCE, new PersistJobAction.Request(jobId)); } } catch (Exception e) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index d4bbc04cdf51d..969bb53571ea4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -54,7 +54,7 @@ public DatafeedJobBuilder(Client client, Settings settings, NamedXContentRegistr void build(String datafeedId, ActionListener listener) { JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client, xContentRegistry); DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry); build(datafeedId, jobResultsProvider, jobConfigProvider, datafeedConfigProvider, listener); @@ -72,10 +72,10 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr // Step 5. Build datafeed job object Consumer contextHanlder = context -> { - TimeValue frequency = getFrequencyOrDefault(datafeedConfigHolder.get(), jobHolder.get()); + TimeValue frequency = getFrequencyOrDefault(datafeedConfigHolder.get(), jobHolder.get(), xContentRegistry); TimeValue queryDelay = datafeedConfigHolder.get().getQueryDelay(); DelayedDataDetector delayedDataDetector = - DelayedDataDetectorFactory.buildDetector(jobHolder.get(), datafeedConfigHolder.get(), client); + DelayedDataDetectorFactory.buildDetector(jobHolder.get(), datafeedConfigHolder.get(), client, xContentRegistry); DatafeedJob datafeedJob = new DatafeedJob(jobHolder.get().getId(), buildDataDescription(jobHolder.get()), frequency.millis(), queryDelay.millis(), context.dataExtractorFactory, client, auditor, currentTimeSupplier, delayedDataDetector, @@ -102,7 +102,7 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr if (dataCounts.getLatestRecordTimeStamp() != null) { context.latestRecordTimeMs = dataCounts.getLatestRecordTimeStamp().getTime(); } - DataExtractorFactory.create(client, datafeedConfigHolder.get(), jobHolder.get(), dataExtractorFactoryHandler); + DataExtractorFactory.create(client, datafeedConfigHolder.get(), jobHolder.get(), xContentRegistry, dataExtractorFactoryHandler); }; // Collect data counts @@ -137,7 +137,7 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr jobBuilder -> { try { jobHolder.set(jobBuilder.build()); - DatafeedJobValidator.validate(datafeedConfigHolder.get(), jobHolder.get()); + DatafeedJobValidator.validate(datafeedConfigHolder.get(), jobHolder.get(), xContentRegistry); jobIdConsumer.accept(jobHolder.get().getId()); } catch (Exception e) { listener.onFailure(e); @@ -162,11 +162,11 @@ void build(String datafeedId, JobResultsProvider jobResultsProvider, JobConfigPr datafeedConfigProvider.getDatafeedConfig(datafeedId, datafeedConfigListener); } - private static TimeValue getFrequencyOrDefault(DatafeedConfig datafeed, Job job) { + private static TimeValue getFrequencyOrDefault(DatafeedConfig datafeed, Job job, NamedXContentRegistry xContentRegistry) { TimeValue frequency = datafeed.getFrequency(); if (frequency == null) { TimeValue bucketSpan = job.getAnalysisConfig().getBucketSpan(); - return datafeed.defaultFrequency(bucketSpan); + return datafeed.defaultFrequency(bucketSpan, xContentRegistry); } return frequency; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 409d15182d96a..53568c3705a8d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -64,7 +64,6 @@ public class DatafeedManager { private final DatafeedJobBuilder datafeedJobBuilder; private final TaskRunner taskRunner = new TaskRunner(); private final AutodetectProcessManager autodetectProcessManager; - private volatile boolean isolated; public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clusterService, DatafeedJobBuilder datafeedJobBuilder, Supplier currentTimeSupplier, Auditor auditor, AutodetectProcessManager autodetectProcessManager) { @@ -130,18 +129,20 @@ public void stopAllDatafeedsOnThisNode(String reason) { * This is used before the JVM is killed. It differs from stopAllDatafeedsOnThisNode in that it leaves * the datafeed tasks in the "started" state, so that they get restarted on a different node. */ - public void isolateAllDatafeedsOnThisNode() { - isolated = true; + public void isolateAllDatafeedsOnThisNodeBeforeShutdown() { Iterator iter = runningDatafeedsOnThisNode.values().iterator(); while (iter.hasNext()) { Holder next = iter.next(); next.isolateDatafeed(); - next.setRelocating(); + // TODO: it's not ideal that this "isolate" method does something a bit different to the one below + next.setNodeIsShuttingDown(); iter.remove(); } } public void isolateDatafeed(long allocationId) { + // This calls get() rather than remove() because we expect that the persistent task will + // be removed shortly afterwards and that operation needs to be able to find the holder Holder holder = runningDatafeedsOnThisNode.get(allocationId); if (holder != null) { holder.isolateDatafeed(); @@ -195,7 +196,7 @@ protected void doRun() { holder.stop("general_lookback_failure", TimeValue.timeValueSeconds(20), e); return; } - if (isolated == false) { + if (holder.isIsolated() == false) { if (next != null) { doDatafeedRealtime(next, holder.datafeedJob.getJobId(), holder); } else { @@ -298,7 +299,7 @@ public class Holder { private final ProblemTracker problemTracker; private final Consumer finishHandler; volatile Scheduler.Cancellable cancellable; - private volatile boolean isRelocating; + private volatile boolean isNodeShuttingDown; Holder(TransportStartDatafeedAction.DatafeedTask task, String datafeedId, DatafeedJob datafeedJob, ProblemTracker problemTracker, Consumer finishHandler) { @@ -324,7 +325,7 @@ boolean isIsolated() { } public void stop(String source, TimeValue timeout, Exception e) { - if (isRelocating) { + if (isNodeShuttingDown) { return; } @@ -344,11 +345,12 @@ public void stop(String source, TimeValue timeout, Exception e) { if (cancellable != null) { cancellable.cancel(); } - auditor.info(datafeedJob.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_STOPPED)); + auditor.info(datafeedJob.getJobId(), + Messages.getMessage(isIsolated() ? Messages.JOB_AUDIT_DATAFEED_ISOLATED : Messages.JOB_AUDIT_DATAFEED_STOPPED)); finishHandler.accept(e); logger.info("[{}] datafeed [{}] for job [{}] has been stopped{}", source, datafeedId, datafeedJob.getJobId(), acquired ? "" : ", but there may be pending tasks as the timeout [" + timeout.getStringRep() + "] expired"); - if (autoCloseJob) { + if (autoCloseJob && isIsolated() == false) { closeJob(); } if (acquired) { @@ -361,16 +363,18 @@ public void stop(String source, TimeValue timeout, Exception e) { } /** - * This stops a datafeed WITHOUT updating the corresponding persistent task. It must ONLY be called - * immediately prior to shutting down a node. Then the datafeed task can remain "started", and be - * relocated to a different node. Calling this method at any other time will ruin the datafeed. + * This stops a datafeed WITHOUT updating the corresponding persistent task. When called it + * will stop the datafeed from sending data to its job as quickly as possible. The caller + * must do something sensible with the corresponding persistent task. If the node is shutting + * down the task will automatically get reassigned. Otherwise the caller must take action to + * remove or reassign the persistent task, or the datafeed will be left in limbo. */ public void isolateDatafeed() { datafeedJob.isolate(); } - public void setRelocating() { - isRelocating = true; + public void setNodeIsShuttingDown() { + isNodeShuttingDown = true; } private Long executeLookBack(long startTime, Long endTime) throws Exception { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index f8fa3b1874808..4faefdc7cbf68 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -29,7 +29,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; /** @@ -102,7 +101,7 @@ private List checkBucketEvents(long start, long end) { request.setExcludeInterim(true); request.setPageParams(new PageParams(0, (int)((end - start)/bucketSpan))); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { GetBucketsAction.Response response = client.execute(GetBucketsAction.INSTANCE, request).actionGet(); return response.getBuckets().results(); } @@ -115,7 +114,7 @@ private Map checkCurrentBucketEventCount(long start, long end) { .query(ExtractorUtils.wrapInTimeRangeQuery(datafeedQuery, timeField, start, end)); SearchRequest searchRequest = new SearchRequest(datafeedIndices).source(searchSourceBuilder); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { SearchResponse response = client.execute(SearchAction.INSTANCE, searchRequest).actionGet(); List buckets = ((Histogram)response.getAggregations().get(DATE_BUCKETS)).getBuckets(); Map hashMap = new HashMap<>(buckets.size()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java index 37f439df7c2d4..88f8e6caadf09 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java @@ -7,6 +7,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -33,9 +34,13 @@ public class DelayedDataDetectorFactory { * @param job The {@link Job} object for the given `datafeedConfig` * @param datafeedConfig The {@link DatafeedConfig} for which to create the {@link DelayedDataDetector} * @param client The {@link Client} capable of taking action against the ES Cluster. + * @param xContentRegistry The current NamedXContentRegistry with which to parse the query * @return A new {@link DelayedDataDetector} */ - public static DelayedDataDetector buildDetector(Job job, DatafeedConfig datafeedConfig, Client client) { + public static DelayedDataDetector buildDetector(Job job, + DatafeedConfig datafeedConfig, + Client client, + NamedXContentRegistry xContentRegistry) { if (datafeedConfig.getDelayedDataCheckConfig().isEnabled()) { long window = validateAndCalculateWindowLength(job.getAnalysisConfig().getBucketSpan(), datafeedConfig.getDelayedDataCheckConfig().getCheckWindow()); @@ -44,7 +49,7 @@ public static DelayedDataDetector buildDetector(Job job, DatafeedConfig datafeed window, job.getId(), job.getDataDescription().getTimeField(), - datafeedConfig.getParsedQuery(), + datafeedConfig.getParsedQuery(xContentRegistry), datafeedConfig.getIndices().toArray(new String[0]), client); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index 77e2c695db7d5..bca57f7155ae3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -8,6 +8,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -25,10 +26,14 @@ public interface DataExtractorFactory { /** * Creates a {@code DataExtractorFactory} for the given datafeed-job combination. */ - static void create(Client client, DatafeedConfig datafeed, Job job, ActionListener listener) { + static void create(Client client, + DatafeedConfig datafeed, + Job job, + NamedXContentRegistry xContentRegistry, + ActionListener listener) { ActionListener factoryHandler = ActionListener.wrap( factory -> listener.onResponse(datafeed.getChunkingConfig().isEnabled() - ? new ChunkedDataExtractorFactory(client, datafeed, job, factory) : factory) + ? new ChunkedDataExtractorFactory(client, datafeed, job, xContentRegistry, factory) : factory) , listener::onFailure ); @@ -36,13 +41,13 @@ static void create(Client client, DatafeedConfig datafeed, Job job, ActionListen response -> { if (response.getJobs().isEmpty()) { // This means no rollup indexes are in the config if (datafeed.hasAggregations()) { - factoryHandler.onResponse(new AggregationDataExtractorFactory(client, datafeed, job)); + factoryHandler.onResponse(new AggregationDataExtractorFactory(client, datafeed, job, xContentRegistry)); } else { - ScrollDataExtractorFactory.create(client, datafeed, job, factoryHandler); + ScrollDataExtractorFactory.create(client, datafeed, job, xContentRegistry, factoryHandler); } } else { if (datafeed.hasAggregations()) { // Rollup indexes require aggregations - RollupDataExtractorFactory.create(client, datafeed, job, response.getJobs(), factoryHandler); + RollupDataExtractorFactory.create(client, datafeed, job, response.getJobs(), xContentRegistry, factoryHandler); } else { listener.onFailure(new IllegalArgumentException("Aggregations are required when using Rollup indices")); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java index df858f45c825e..aa5c7ed6314b4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -80,6 +80,11 @@ public void cancel() { hasNext = false; } + @Override + public long getEndTime() { + return context.end; + } + @Override public Optional next() throws IOException { if (!hasNext()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactory.java index 12c4a47228f10..de205b276a049 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactory.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; @@ -19,23 +20,25 @@ public class AggregationDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; private final Job job; + private final NamedXContentRegistry xContentRegistry; - public AggregationDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job) { + public AggregationDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, NamedXContentRegistry xContentRegistry) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); this.job = Objects.requireNonNull(job); + this.xContentRegistry = xContentRegistry; } @Override public DataExtractor newExtractor(long start, long end) { - long histogramInterval = datafeedConfig.getHistogramIntervalMillis(); + long histogramInterval = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); AggregationDataExtractorContext dataExtractorContext = new AggregationDataExtractorContext( job.getId(), job.getDataDescription().getTimeField(), job.getAnalysisConfig().analysisFields(), datafeedConfig.getIndices(), - datafeedConfig.getParsedQuery(), - datafeedConfig.getParsedAggregations(), + datafeedConfig.getParsedQuery(xContentRegistry), + datafeedConfig.getParsedAggregations(xContentRegistry), Intervals.alignToCeil(start, histogramInterval), Intervals.alignToFloor(end, histogramInterval), job.getAnalysisConfig().getSummaryCountFieldName().equals(DatafeedConfig.DOC_COUNT), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java index d5290611ab062..4971ad838799d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; @@ -41,23 +42,25 @@ public class RollupDataExtractorFactory implements DataExtractorFactory { private final Client client; private final DatafeedConfig datafeedConfig; private final Job job; + private final NamedXContentRegistry xContentRegistry; - private RollupDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job) { + private RollupDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, NamedXContentRegistry xContentRegistry) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); this.job = Objects.requireNonNull(job); + this.xContentRegistry = xContentRegistry; } @Override public DataExtractor newExtractor(long start, long end) { - long histogramInterval = datafeedConfig.getHistogramIntervalMillis(); + long histogramInterval = datafeedConfig.getHistogramIntervalMillis(xContentRegistry); AggregationDataExtractorContext dataExtractorContext = new AggregationDataExtractorContext( job.getId(), job.getDataDescription().getTimeField(), job.getAnalysisConfig().analysisFields(), datafeedConfig.getIndices(), - datafeedConfig.getParsedQuery(), - datafeedConfig.getParsedAggregations(), + datafeedConfig.getParsedQuery(xContentRegistry), + datafeedConfig.getParsedAggregations(xContentRegistry), Intervals.alignToCeil(start, histogramInterval), Intervals.alignToFloor(end, histogramInterval), job.getAnalysisConfig().getSummaryCountFieldName().equals(DatafeedConfig.DOC_COUNT), @@ -69,10 +72,11 @@ public static void create(Client client, DatafeedConfig datafeed, Job job, Map rollupJobsWithCaps, + NamedXContentRegistry xContentRegistry, ActionListener listener) { final AggregationBuilder datafeedHistogramAggregation = getHistogramAggregation( - datafeed.getParsedAggregations().getAggregatorFactories()); + datafeed.getParsedAggregations(xContentRegistry).getAggregatorFactories()); if ((datafeedHistogramAggregation instanceof DateHistogramAggregationBuilder) == false) { listener.onFailure( new IllegalArgumentException("Rollup requires that the datafeed configuration use a [date_histogram] aggregation," + @@ -103,7 +107,8 @@ public static void create(Client client, return; } final List flattenedAggs = new ArrayList<>(); - flattenAggregations(datafeed.getParsedAggregations().getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); + flattenAggregations(datafeed.getParsedAggregations(xContentRegistry) + .getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); if (validIntervalCaps.stream().noneMatch(rollupJobConfig -> hasAggregations(rollupJobConfig, flattenedAggs))) { listener.onFailure( @@ -112,7 +117,7 @@ public static void create(Client client, return; } - listener.onResponse(new RollupDataExtractorFactory(client, datafeed, job)); + listener.onResponse(new RollupDataExtractorFactory(client, datafeed, job, xContentRegistry)); } private static boolean validInterval(long datafeedInterval, ParsedRollupCaps rollupJobGroupConfig) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java index 618ae6ee9a30d..f1e1fe2a10a32 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java @@ -114,7 +114,7 @@ private void setUpChunkedSearch() throws IOException { currentEnd = currentStart; chunkSpan = context.chunkSpan == null ? dataSummary.estimateChunk() : context.chunkSpan.getMillis(); chunkSpan = context.timeAligner.alignToCeil(chunkSpan); - LOGGER.debug("[{}]Chunked search configured: kind = {}, dataTimeSpread = {} ms, chunk span = {} ms", + LOGGER.debug("[{}] Chunked search configured: kind = {}, dataTimeSpread = {} ms, chunk span = {} ms", context.jobId, dataSummary.getClass().getSimpleName(), dataSummary.getDataTimeSpread(), chunkSpan); } else { // search is over @@ -170,6 +170,11 @@ public void cancel() { isCancelled = true; } + @Override + public long getEndTime() { + return context.end; + } + ChunkedDataExtractorContext getContext() { return context; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java index 76a05e6b4d16a..fb8da71faa3fe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.chunked; import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; @@ -20,12 +21,18 @@ public class ChunkedDataExtractorFactory implements DataExtractorFactory { private final DatafeedConfig datafeedConfig; private final Job job; private final DataExtractorFactory dataExtractorFactory; + private final NamedXContentRegistry xContentRegistry; - public ChunkedDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, DataExtractorFactory dataExtractorFactory) { + public ChunkedDataExtractorFactory(Client client, + DatafeedConfig datafeedConfig, + Job job, + NamedXContentRegistry xContentRegistry, + DataExtractorFactory dataExtractorFactory) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); this.job = Objects.requireNonNull(job); this.dataExtractorFactory = Objects.requireNonNull(dataExtractorFactory); + this.xContentRegistry = xContentRegistry; } @Override @@ -35,7 +42,7 @@ public DataExtractor newExtractor(long start, long end) { job.getId(), job.getDataDescription().getTimeField(), datafeedConfig.getIndices(), - datafeedConfig.getParsedQuery(), + datafeedConfig.getParsedQuery(xContentRegistry), datafeedConfig.getScrollSize(), timeAligner.alignToCeil(start), timeAligner.alignToFloor(end), @@ -43,7 +50,7 @@ public DataExtractor newExtractor(long start, long end) { timeAligner, datafeedConfig.getHeaders(), datafeedConfig.hasAggregations(), - datafeedConfig.hasAggregations() ? datafeedConfig.getHistogramIntervalMillis() : null + datafeedConfig.hasAggregations() ? datafeedConfig.getHistogramIntervalMillis(xContentRegistry) : null ); return new ChunkedDataExtractor(client, dataExtractorFactory, dataExtractorContext); } @@ -55,7 +62,7 @@ private ChunkedDataExtractorContext.TimeAligner newTimeAligner() { // the same bucket twice, we need to search buckets aligned to the histogram interval. // This allows us to steer away from partial buckets, and thus avoid the problem of // dropping or duplicating data. - return newIntervalTimeAligner(datafeedConfig.getHistogramIntervalMillis()); + return newIntervalTimeAligner(datafeedConfig.getHistogramIntervalMillis(xContentRegistry)); } return newIdentityTimeAligner(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 5e6eb96637deb..dea775c24ca36 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -77,19 +77,34 @@ public void cancel() { isCancelled = true; } + @Override + public long getEndTime() { + return context.end; + } + @Override public Optional next() throws IOException { if (!hasNext()) { throw new NoSuchElementException(); } - Optional stream = scrollId == null ? - Optional.ofNullable(initScroll(context.start)) : Optional.ofNullable(continueScroll()); + Optional stream = tryNextStream(); if (!stream.isPresent()) { hasNext = false; } return stream; } + private Optional tryNextStream() throws IOException { + try { + return scrollId == null ? + Optional.ofNullable(initScroll(context.start)) : Optional.ofNullable(continueScroll()); + } catch (Exception e) { + // In case of error make sure we clear the scroll context + clearScroll(); + throw e; + } + } + protected InputStream initScroll(long startTimestamp) throws IOException { LOGGER.debug("[{}] Initializing scroll", context.jobId); SearchResponse searchResponse = executeSearchRequest(buildSearchRequest(startTimestamp)); @@ -126,6 +141,8 @@ private SearchRequestBuilder buildSearchRequest(long start) { private InputStream processSearchResponse(SearchResponse searchResponse) throws IOException { + scrollId = searchResponse.getScrollId(); + if (searchResponse.getFailedShards() > 0 && searchHasShardFailure == false) { LOGGER.debug("[{}] Resetting scroll search after shard failure", context.jobId); markScrollAsErrored(); @@ -133,10 +150,9 @@ private InputStream processSearchResponse(SearchResponse searchResponse) throws } ExtractorUtils.checkSearchWasSuccessful(context.jobId, searchResponse); - scrollId = searchResponse.getScrollId(); if (searchResponse.getHits().getHits().length == 0) { hasNext = false; - clearScroll(scrollId); + clearScroll(); return null; } @@ -150,7 +166,7 @@ private InputStream processSearchResponse(SearchResponse searchResponse) throws timestampOnCancel = timestamp; } else if (timestamp.equals(timestampOnCancel) == false) { hasNext = false; - clearScroll(scrollId); + clearScroll(); break; } } @@ -184,7 +200,7 @@ private InputStream continueScroll() throws IOException { private void markScrollAsErrored() { // This could be a transient error with the scroll Id. // Reinitialise the scroll and try again but only once. - resetScroll(); + clearScroll(); if (lastTimestamp != null) { lastTimestamp++; } @@ -199,17 +215,13 @@ protected SearchResponse executeSearchScrollRequest(String scrollId) { .get()); } - private void resetScroll() { - clearScroll(scrollId); - scrollId = null; - } - - private void clearScroll(String scrollId) { + private void clearScroll() { if (scrollId != null) { ClearScrollRequest request = new ClearScrollRequest(); request.addScrollId(scrollId); ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, () -> client.execute(ClearScrollAction.INSTANCE, request).actionGet()); + scrollId = null; } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 763d718bcc79e..ab912f54fe2a1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -29,12 +30,15 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { private final DatafeedConfig datafeedConfig; private final Job job; private final TimeBasedExtractedFields extractedFields; + private final NamedXContentRegistry xContentRegistry; - private ScrollDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, TimeBasedExtractedFields extractedFields) { + private ScrollDataExtractorFactory(Client client, DatafeedConfig datafeedConfig, Job job, TimeBasedExtractedFields extractedFields, + NamedXContentRegistry xContentRegistry) { this.client = Objects.requireNonNull(client); this.datafeedConfig = Objects.requireNonNull(datafeedConfig); this.job = Objects.requireNonNull(job); this.extractedFields = Objects.requireNonNull(extractedFields); + this.xContentRegistry = xContentRegistry; } @Override @@ -43,7 +47,7 @@ public DataExtractor newExtractor(long start, long end) { job.getId(), extractedFields, datafeedConfig.getIndices(), - datafeedConfig.getParsedQuery(), + datafeedConfig.getParsedQuery(xContentRegistry), datafeedConfig.getScriptFields(), datafeedConfig.getScrollSize(), start, @@ -52,13 +56,17 @@ public DataExtractor newExtractor(long start, long end) { return new ScrollDataExtractor(client, dataExtractorContext); } - public static void create(Client client, DatafeedConfig datafeed, Job job, ActionListener listener) { + public static void create(Client client, + DatafeedConfig datafeed, + Job job, + NamedXContentRegistry xContentRegistry, + ActionListener listener ) { // Step 2. Contruct the factory and notify listener ActionListener fieldCapabilitiesHandler = ActionListener.wrap( fieldCapabilitiesResponse -> { TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); - listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, extractedFields)); + listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry)); }, e -> { if (e instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException("datafeed [" + datafeed.getId() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index 7237ab0eb9818..a464105d6c117 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -50,7 +49,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.job.persistence.ExpandedIdsMatcher; @@ -126,12 +124,11 @@ public void putDatafeedConfig(DatafeedConfig config, Map headers try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = config.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)) - .setSource(source) - .setOpType(DocWriteRequest.OpType.CREATE) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .request(); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName()) + .id(DatafeedConfig.documentId(datafeedId)) + .source(source) + .opType(DocWriteRequest.OpType.CREATE) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( listener::onResponse, @@ -162,8 +159,7 @@ public void putDatafeedConfig(DatafeedConfig config, Map headers * @param datafeedConfigListener The config listener */ public void getDatafeedConfig(String datafeedId, ActionListener datafeedConfigListener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), DatafeedConfig.documentId(datafeedId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override public void onResponse(GetResponse getResponse) { @@ -230,8 +226,7 @@ public void findDatafeedsForJobIds(Collection jobIds, ActionListener actionListener) { - DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), DatafeedConfig.documentId(datafeedId)); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, DeleteAction.INSTANCE, request, new ActionListener() { @Override @@ -268,8 +263,7 @@ public void onFailure(Exception e) { public void updateDatefeedConfig(String datafeedId, DatafeedUpdate update, Map headers, BiConsumer> validator, ActionListener updatedConfigListener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(datafeedId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), DatafeedConfig.documentId(datafeedId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override @@ -325,15 +319,15 @@ private void indexUpdatedConfig(DatafeedConfig updatedConfig, long seqNo, long p ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder updatedSource = updatedConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequestBuilder indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, DatafeedConfig.documentId(updatedConfig.getId())) - .setSource(updatedSource) + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName()) + .id(DatafeedConfig.documentId(updatedConfig.getId())) + .source(updatedSource) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); indexRequest.setIfSeqNo(seqNo); indexRequest.setIfPrimaryTerm(primaryTerm); - executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest.request(), listener); + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, listener); } catch (IOException e) { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index ccd0d594eb382..929058739cdf2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -101,7 +102,7 @@ public class JobManager { */ public JobManager(Environment environment, Settings settings, JobResultsProvider jobResultsProvider, ClusterService clusterService, Auditor auditor, ThreadPool threadPool, - Client client, UpdateJobProcessNotifier updateJobProcessNotifier) { + Client client, UpdateJobProcessNotifier updateJobProcessNotifier, NamedXContentRegistry xContentRegistry) { this.environment = environment; this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); this.clusterService = Objects.requireNonNull(clusterService); @@ -109,7 +110,7 @@ public JobManager(Environment environment, Settings settings, JobResultsProvider this.client = Objects.requireNonNull(client); this.threadPool = Objects.requireNonNull(threadPool); this.updateJobProcessNotifier = updateJobProcessNotifier; - this.jobConfigProvider = new JobConfigProvider(client); + this.jobConfigProvider = new JobConfigProvider(client, xContentRegistry); this.migrationEligibilityCheck = new MlConfigMigrationEligibilityCheck(settings, clusterService); maxModelMemoryLimit = MachineLearningField.MAX_MODEL_MEMORY_LIMIT.get(settings); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index 9423768b8ed4f..5ce424a158187 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -62,7 +61,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; @@ -104,9 +102,11 @@ public class JobConfigProvider { } private final Client client; + private final NamedXContentRegistry xContentRegistry; - public JobConfigProvider(Client client) { + public JobConfigProvider(Client client, NamedXContentRegistry xContentRegistry) { this.client = client; + this.xContentRegistry = xContentRegistry; } /** @@ -120,12 +120,11 @@ public JobConfigProvider(Client client) { public void putJob(Job job, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = job.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(job.getId())) - .setSource(source) - .setOpType(DocWriteRequest.OpType.CREATE) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .request(); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName()) + .id(Job.documentId(job.getId())) + .source(source) + .opType(DocWriteRequest.OpType.CREATE) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( listener::onResponse, @@ -155,8 +154,7 @@ public void putJob(Job job, ActionListener listener) { * @param jobListener Job listener */ public void getJob(String jobId, ActionListener jobListener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, getRequest, new ActionListener() { @Override @@ -193,8 +191,7 @@ public void onFailure(Exception e) { * @param actionListener Deleted job listener */ public void deleteJob(String jobId, boolean errorIfMissing, ActionListener actionListener) { - DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + DeleteRequest request = new DeleteRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client, ML_ORIGIN, DeleteAction.INSTANCE, request, new ActionListener() { @@ -230,8 +227,7 @@ public void onFailure(Exception e) { */ public void updateJob(String jobId, JobUpdate update, ByteSizeValue maxModelMemoryLimit, ActionListener updatedJobListener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override @@ -295,8 +291,7 @@ public interface UpdateValidator { */ public void updateJobWithValidation(String jobId, JobUpdate update, ByteSizeValue maxModelMemoryLimit, UpdateValidator validator, ActionListener updatedJobListener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @Override @@ -347,14 +342,14 @@ private void indexUpdatedJob(Job updatedJob, long seqNo, long primaryTerm, ActionListener updatedJobListener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder updatedSource = updatedJob.toXContent(builder, ToXContent.EMPTY_PARAMS); - IndexRequestBuilder indexRequest = client.prepareIndex(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(updatedJob.getId())) - .setSource(updatedSource) + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.configIndexName()) + .id(Job.documentId(updatedJob.getId())) + .source(updatedSource) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); indexRequest.setIfSeqNo(seqNo); indexRequest.setIfPrimaryTerm(primaryTerm); - executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest.request(), ActionListener.wrap( + executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( indexResponse -> { assert indexResponse.getResult() == DocWriteResponse.Result.UPDATED; updatedJobListener.onResponse(updatedJob); @@ -383,8 +378,7 @@ private void indexUpdatedJob(Job updatedJob, long seqNo, long primaryTerm, * @param listener Exists listener */ public void jobExists(String jobId, boolean errorIfMissing, ActionListener listener) { - GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + GetRequest getRequest = new GetRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); getRequest.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener() { @@ -458,8 +452,7 @@ public void jobIdMatches(List ids, ActionListener> listener * @param listener Responds with true if successful else an error */ public void markJobAsDeleting(String jobId, ActionListener listener) { - UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), - ElasticsearchMappings.DOC_TYPE, Job.documentId(jobId)); + UpdateRequest updateRequest = new UpdateRequest(AnomalyDetectorsIndex.configIndexName(), Job.documentId(jobId)); updateRequest.retryOnConflict(3); updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); updateRequest.doc(Collections.singletonMap(Job.DELETING.getPreferredName(), Boolean.TRUE)); @@ -746,7 +739,7 @@ public void validateDatafeedJob(DatafeedConfig config, ActionListener l getJob(config.getJobId(), ActionListener.wrap( jobBuilder -> { try { - DatafeedJobValidator.validate(config, jobBuilder.build()); + DatafeedJobValidator.validate(config, jobBuilder.build(), xContentRegistry); listener.onResponse(Boolean.TRUE); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java index d175df5e9e25a..a0017af4b8c47 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataCountsPersister.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import java.io.IOException; @@ -54,10 +53,9 @@ private XContentBuilder serialiseCounts(DataCounts counts) throws IOException { */ public void persistDataCounts(String jobId, DataCounts counts, ActionListener listener) { try (XContentBuilder content = serialiseCounts(counts)) { - final IndexRequest request = client.prepareIndex(AnomalyDetectorsIndex.resultsWriteAlias(jobId), ElasticsearchMappings.DOC_TYPE, - DataCounts.documentId(jobId)) - .setSource(content) - .request(); + final IndexRequest request = new IndexRequest(AnomalyDetectorsIndex.resultsWriteAlias(jobId)) + .id(DataCounts.documentId(jobId)) + .source(content); executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, request, new ActionListener() { @Override public void onResponse(IndexResponse indexResponse) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index c96388213c8c0..9380be0c15b78 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -33,7 +33,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class JobDataDeleter { @@ -115,7 +114,7 @@ public void deleteInterimResults() { QueryBuilder qb = QueryBuilders.termQuery(Result.IS_INTERIM.getPreferredName(), true); deleteByQueryHolder.dbqRequest.setQuery(new ConstantScoreQueryBuilder(qb)); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest).get(); } catch (Exception e) { LOGGER.error("[" + jobId + "] An error occurred while deleting interim results", e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java index e5f9e8e85959e..118a3913ee435 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java @@ -15,18 +15,16 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.ml.job.process.normalizer.BucketNormalizable; -import org.elasticsearch.xpack.ml.job.process.normalizer.Normalizable; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; +import org.elasticsearch.xpack.ml.job.process.normalizer.BucketNormalizable; +import org.elasticsearch.xpack.ml.job.process.normalizer.Normalizable; import java.io.IOException; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings.DOC_TYPE; /** @@ -78,7 +76,7 @@ public void updateResults(List normalizables) { public void updateResult(String id, String index, ToXContent resultDoc) { try (XContentBuilder content = toXContentBuilder(resultDoc)) { - bulkRequest.add(new IndexRequest(index, DOC_TYPE, id).source(content)); + bulkRequest.add(new IndexRequest(index).id(id).source(content)); } catch (IOException e) { logger.error(new ParameterizedMessage("[{}] Error serialising result", jobId), e); } @@ -102,7 +100,7 @@ public void executeRequest() { } logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, bulkRequest.numberOfActions()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet(); if (addRecordsResponse.hasFailures()) { logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index 2a16b1c8ddd8a..fc5b87d4afebe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -45,8 +45,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings.DOC_TYPE; /** * Persists result types, Quantiles etc to Elasticsearch
@@ -175,7 +173,7 @@ public Builder persistForecastRequestStats(ForecastRequestStats forecastRequestS private void indexResult(String id, ToXContent resultDoc, String resultType) { try (XContentBuilder content = toXContentBuilder(resultDoc)) { - bulkRequest.add(new IndexRequest(indexName, DOC_TYPE, id).source(content)); + bulkRequest.add(new IndexRequest(indexName).id(id).source(content)); } catch (IOException e) { logger.error(new ParameterizedMessage("[{}] Error serialising {}", jobId, resultType), e); } @@ -194,7 +192,7 @@ public void executeRequest() { } logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, bulkRequest.numberOfActions()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet(); if (addRecordsResponse.hasFailures()) { logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage()); @@ -293,7 +291,7 @@ public void commitResultWrites(String jobId) { logger.trace("[{}] ES API CALL: refresh index {}", jobId, indexName); RefreshRequest refreshRequest = new RefreshRequest(indexName); refreshRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.admin().indices().refresh(refreshRequest).actionGet(); } } @@ -310,7 +308,7 @@ public void commitStateWrites(String jobId) { logger.trace("[{}] ES API CALL: refresh index {}", jobId, indexName); RefreshRequest refreshRequest = new RefreshRequest(indexName); refreshRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.admin().indices().refresh(refreshRequest).actionGet(); } } @@ -349,7 +347,7 @@ void persist(String indexName, ActionListener listener) { logCall(indexName); try (XContentBuilder content = toXContentBuilder(object)) { - IndexRequest indexRequest = new IndexRequest(indexName, DOC_TYPE, id).source(content).setRefreshPolicy(refreshPolicy); + IndexRequest indexRequest = new IndexRequest(indexName).id(id).source(content).setRefreshPolicy(refreshPolicy); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, listener, client::index); } catch (IOException e) { logger.error(new ParameterizedMessage("[{}] Error writing [{}]", jobId, (id == null) ? "auto-generated ID" : id), e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index 5ae1cafc9c461..84f8a3c1d4e89 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -16,6 +16,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -37,11 +39,12 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -123,10 +126,10 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.clientWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class JobResultsProvider { private static final Logger LOGGER = LogManager.getLogger(JobResultsProvider.class); @@ -252,7 +255,25 @@ public void createJobResultIndex(Job job, ClusterState state, final ActionListen String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(job.getId()); String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(job.getId()); - String indexName = job.getInitialResultsIndexName(); + String tempIndexName = job.getInitialResultsIndexName(); + + // Our read/write aliases should point to the concrete index + // If the initial index is NOT an alias, either it is already a concrete index, or it does not exist yet + if (state.getMetaData().hasAlias(tempIndexName)) { + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); + String[] concreteIndices = resolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), tempIndexName); + + // SHOULD NOT be closed as in typical call flow checkForLeftOverDocuments already verified this + // if it is closed, we bailout and return an error + if (concreteIndices.length == 0) { + finalListener.onFailure( + ExceptionsHelper.badRequestException("Cannot create job [{}] as it requires closed index {}", job.getId(), + tempIndexName)); + return; + } + tempIndexName = concreteIndices[0]; + } + final String indexName = tempIndexName; final ActionListener createAliasListener = ActionListener.wrap(success -> { final IndicesAliasesRequest request = client.admin().indices().prepareAliases() @@ -270,8 +291,8 @@ public void createJobResultIndex(Job job, ClusterState state, final ActionListen CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); // This assumes the requested mapping will be merged with mappings from the template, // and may need to be revisited if template merging is ever refactored - try (XContentBuilder termFieldsMapping = ElasticsearchMappings.termFieldsMapping(ElasticsearchMappings.DOC_TYPE, termFields)) { - createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, termFieldsMapping); + try (XContentBuilder termFieldsMapping = ElasticsearchMappings.termFieldsMapping(termFields)) { + createIndexRequest.mapping(SINGLE_MAPPING_NAME, termFieldsMapping); } executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, createIndexRequest, ActionListener.wrap( @@ -281,35 +302,54 @@ public void createJobResultIndex(Job job, ClusterState state, final ActionListen // so we need to handle that possibility if (e instanceof ResourceAlreadyExistsException) { LOGGER.info("Index already exists"); - // Create the alias - createAliasListener.onResponse(true); + // Add the term field mappings and alias. The complication is that the state at the + // beginning of the operation doesn't have any knowledge of the index, as it's only + // just been created. So we need yet another operation to get the mappings for it. + getLatestIndexMappings(indexName, ActionListener.wrap( + response -> { + // Expect one index and one type. If this is not the case then it means the + // index has been deleted almost immediately after being created, and this is + // so unlikely that it's reasonable to fail the whole operation. + ImmutableOpenMap indexMappings = + response.getMappings().iterator().next().value; + MappingMetaData typeMappings = indexMappings.iterator().next().value; + addTermsAndAliases(typeMappings, indexName, termFields, createAliasListener); + }, + finalListener::onFailure + )); } else { finalListener.onFailure(e); } } ), client.admin().indices()::create); } else { - long fieldCountLimit = MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.get(settings); - if (violatedFieldCountLimit(indexName, termFields.size(), fieldCountLimit, state)) { - String message = "Cannot create job in index '" + indexName + "' as the " + - MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey() + " setting will be violated"; - finalListener.onFailure(new IllegalArgumentException(message)); - } else { - updateIndexMappingWithTermFields(indexName, termFields, - ActionListener.wrap(createAliasListener::onResponse, finalListener::onFailure)); - } + MappingMetaData mapping = state.metaData().index(indexName).mapping(); + addTermsAndAliases(mapping, indexName, termFields, createAliasListener); } } - public static boolean violatedFieldCountLimit( - String indexName, long additionalFieldCount, long fieldCountLimit, ClusterState clusterState) { - long numFields = 0; - IndexMetaData indexMetaData = clusterState.metaData().index(indexName); - Iterator mappings = indexMetaData.getMappings().valuesIt(); - while (mappings.hasNext()) { - MappingMetaData mapping = mappings.next(); - numFields += countFields(mapping.sourceAsMap()); + private void getLatestIndexMappings(final String indexName, final ActionListener listener) { + + GetMappingsRequest getMappingsRequest = client.admin().indices().prepareGetMappings(indexName).request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, getMappingsRequest, listener, + client.admin().indices()::getMappings); + } + + private void addTermsAndAliases(final MappingMetaData mapping, final String indexName, final Collection termFields, + final ActionListener listener) { + long fieldCountLimit = MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.get(settings); + + if (violatedFieldCountLimit(termFields.size(), fieldCountLimit, mapping)) { + String message = "Cannot create job in index '" + indexName + "' as the " + + MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey() + " setting will be violated"; + listener.onFailure(new IllegalArgumentException(message)); + } else { + updateIndexMappingWithTermFields(indexName, mapping.type(), termFields, listener); } + } + + public static boolean violatedFieldCountLimit(long additionalFieldCount, long fieldCountLimit, MappingMetaData mapping) { + long numFields = countFields(mapping.sourceAsMap()); return numFields + additionalFieldCount > fieldCountLimit; } @@ -334,10 +374,12 @@ public static int countFields(Map mapping) { return count; } - private void updateIndexMappingWithTermFields(String indexName, Collection termFields, ActionListener listener) { - // Put the whole "doc" mapping, not just the term fields, otherwise we'll wipe the _meta section of the mapping - try (XContentBuilder termFieldsMapping = ElasticsearchMappings.resultsMapping(termFields)) { - final PutMappingRequest request = client.admin().indices().preparePutMapping(indexName).setType(ElasticsearchMappings.DOC_TYPE) + private void updateIndexMappingWithTermFields(String indexName, String mappingType, Collection termFields, + ActionListener listener) { + // Put the whole mapping, not just the term fields, otherwise we'll wipe the _meta section of the mapping + try (XContentBuilder termFieldsMapping = ElasticsearchMappings.resultsMapping(mappingType, termFields)) { + final PutMappingRequest request = client.admin().indices().preparePutMapping(indexName) + .setType(mappingType) .setSource(termFieldsMapping).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, request, new ActionListener() { @Override @@ -485,7 +527,7 @@ private T parseSearchHit(SearchHit hit, BiFunction .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { return objectParser.apply(parser, null); } catch (IOException e) { - errorHandler.accept(new ElasticsearchParseException("failed to parse " + hit.getType(), e)); + errorHandler.accept(new ElasticsearchParseException("failed to parse " + hit.getId(), e)); return null; } } @@ -884,7 +926,7 @@ public QueryPage modelPlot(String jobId, int from, int size) { String indexName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); LOGGER.trace("ES API CALL: search model plots from index {} from {} size {}", indexName, from, size); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { searchResponse = client.prepareSearch(indexName) .setIndicesOptions(MlIndicesUtils.addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS)) .setQuery(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), ModelPlot.RESULT_TYPE_VALUE)) @@ -1165,7 +1207,7 @@ public void updateCalendar(String calendarId, Set jobIdsToAdd, Set(currentJobs), calendar.getDescription()); - UpdateRequest updateRequest = new UpdateRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, updatedCalendar.documentId()); + UpdateRequest updateRequest = new UpdateRequest(MlMetaIndex.INDEX_NAME, updatedCalendar.documentId()); updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { @@ -1233,8 +1275,7 @@ public void removeJobFromCalendars(String jobId, ActionListener listene ids.remove(jobId); return new Calendar(c.getId(), new ArrayList<>(ids), c.getDescription()); }).forEach(c -> { - UpdateRequest updateRequest = new UpdateRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, - c.documentId()); + UpdateRequest updateRequest = new UpdateRequest(MlMetaIndex.INDEX_NAME, c.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { updateRequest.doc(c.toXContent(builder, ToXContent.EMPTY_PARAMS)); } catch (IOException e) { @@ -1257,7 +1298,7 @@ public void removeJobFromCalendars(String jobId, ActionListener listene } public void calendar(String calendarId, ActionListener listener) { - GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, Calendar.documentId(calendarId)); + GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, Calendar.documentId(calendarId)); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, getRequest, new ActionListener() { @Override public void onResponse(GetResponse getDocResponse) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java index 3ed91412042c7..3d5f3d4ea91e3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; @@ -24,7 +23,6 @@ import java.util.Objects; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; /** * A {@code StateStreamer} fetches the various state documents and @@ -73,9 +71,8 @@ public void restoreStateToStream(String jobId, ModelSnapshot modelSnapshot, Outp LOGGER.trace("ES API CALL: get ID {} from index {}", stateDocId, indexName); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { SearchResponse stateResponse = client.prepareSearch(indexName) - .setTypes(ElasticsearchMappings.DOC_TYPE) .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds(stateDocId)).get(); if (stateResponse.getHits().getHits().length == 0) { @@ -100,9 +97,8 @@ public void restoreStateToStream(String jobId, ModelSnapshot modelSnapshot, Outp LOGGER.trace("ES API CALL: get ID {} from index {}", docId, indexName); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { SearchResponse stateResponse = client.prepareSearch(indexName) - .setTypes(ElasticsearchMappings.DOC_TYPE) .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds(docId)).get(); if (stateResponse.getHits().getHits().length == 0) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index b3f765d89ce1a..7e778e48524ba 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -90,9 +90,8 @@ public class AutodetectCommunicator implements Closeable { && job.getAnalysisConfig().getCategorizationFieldName() != null; } - public void init(ModelSnapshot modelSnapshot) throws IOException { + public void restoreState(ModelSnapshot modelSnapshot) { autodetectProcess.restoreState(stateStreamer, modelSnapshot); - createProcessWriter(Optional.empty()).writeHeader(); } private DataToProcessWriter createProcessWriter(Optional dataDescription) { @@ -101,6 +100,17 @@ private DataToProcessWriter createProcessWriter(Optional dataDe dataCountsReporter, xContentRegistry); } + /** + * This must be called once before {@link #writeToJob(InputStream, AnalysisRegistry, XContentType, DataLoadParams, BiConsumer)} + * can be used + */ + public void writeHeader() throws IOException { + createProcessWriter(Optional.empty()).writeHeader(); + } + + /** + * Call {@link #writeHeader()} exactly once before using this method + */ public void writeToJob(InputStream inputStream, AnalysisRegistry analysisRegistry, XContentType xContentType, DataLoadParams params, BiConsumer handler) { submitOperation(() -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 6b8eada7406f6..46c8aa6930ada 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -455,16 +455,12 @@ protected void doRun() { logger.debug("Aborted opening job [{}] as it has been closed", jobId); return; } - if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { - logger.debug("Cannot open job [{}] when its state is [{}]", - jobId, processContext.getState().getClass().getName()); - return; - } try { - createProcessAndSetRunning(processContext, job, params, closeHandler); - processContext.getAutodetectCommunicator().init(params.modelSnapshot()); - setJobState(jobTask, JobState.OPENED); + if (createProcessAndSetRunning(processContext, job, params, closeHandler)) { + processContext.getAutodetectCommunicator().restoreState(params.modelSnapshot()); + setJobState(jobTask, JobState.OPENED); + } } catch (Exception e1) { // No need to log here as the persistent task framework will log it try { @@ -501,18 +497,25 @@ protected void doRun() { ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } - private void createProcessAndSetRunning(ProcessContext processContext, - Job job, - AutodetectParams params, - BiConsumer handler) { + private boolean createProcessAndSetRunning(ProcessContext processContext, + Job job, + AutodetectParams params, + BiConsumer handler) throws IOException { // At this point we lock the process context until the process has been started. // The reason behind this is to ensure closing the job does not happen before // the process is started as that can result to the job getting seemingly closed // but the actual process is hanging alive. processContext.tryLock(); try { + if (processContext.getState() != ProcessContext.ProcessStateName.NOT_RUNNING) { + logger.debug("Cannot open job [{}] when its state is [{}]", + job.getId(), processContext.getState().getClass().getName()); + return false; + } AutodetectCommunicator communicator = create(processContext.getJobTask(), job, params, handler); + communicator.writeHeader(); processContext.setRunning(communicator); + return true; } finally { // Now that the process is running and we have updated its state we can unlock. // It is important to unlock before we initialize the communicator (ie. load the model state) @@ -641,7 +644,9 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { processContext.tryLock(); try { if (processContext.setDying() == false) { - logger.debug("Cannot close job [{}] as it has already been closed", jobId); + logger.debug("Cannot close job [{}] as it has been marked as dying", jobId); + // The only way we can get here is if 2 close requests are made very close together. + // The other close has done the work so it's safe to return here without doing anything. return; } @@ -655,10 +660,10 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { if (communicator == null) { logger.debug("Job [{}] is being closed before its process is started", jobId); jobTask.markAsCompleted(); - return; + } else { + communicator.close(restart, reason); } - communicator.close(restart, reason); processByAllocation.remove(allocationId); } catch (Exception e) { // If the close failed because the process has explicitly been killed by us then just pass on that exception @@ -678,7 +683,7 @@ public void closeJob(JobTask jobTask, boolean restart, String reason) { try { removeTmpStorage(jobId); } catch (IOException e) { - logger.error(new ParameterizedMessage("[{}]Failed to delete temporary files", jobId), e); + logger.error(new ParameterizedMessage("[{}] Failed to delete temporary files", jobId), e); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java index 9d3afd0ad0dcb..1a418bfb2a1c2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.ml.process.StateProcessor; import java.io.IOException; @@ -24,7 +23,6 @@ import java.util.List; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; /** * Reads the autodetect state and persists via a bulk request @@ -98,10 +96,10 @@ private BytesReference splitAndPersist(BytesReference bytesRef, int searchFrom) void persist(BytesReference bytes) throws IOException { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexWriteAlias(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON); + bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexWriteAlias(), XContentType.JSON); if (bulkRequest.numberOfActions() > 0) { LOGGER.trace("[{}] Persisting job state document", jobId); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { client.bulk(bulkRequest).actionGet(); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java index 249d3761b5842..4d2c9b76438ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; @@ -105,7 +104,6 @@ private void executeDeleteUnusedStateDocs(List unusedDocIds, ActionListe LOGGER.info("Found [{}] unused state documents; attempting to delete", unusedDocIds.size()); DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(AnomalyDetectorsIndex.jobStateIndexPattern()) - .types(ElasticsearchMappings.DOC_TYPE) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setQuery(QueryBuilders.idsQuery().addIds(unusedDocIds.toArray(new String[0]))); client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java index addf478708a9e..4b5da8fae2cda 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java @@ -7,7 +7,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; @@ -38,30 +37,30 @@ public Auditor(Client client, String nodeName) { } public void info(String jobId, String message) { - indexDoc(AuditMessage.TYPE.getPreferredName(), AuditMessage.newInfo(jobId, message, nodeName)); + indexDoc(AuditMessage.newInfo(jobId, message, nodeName)); } public void warning(String jobId, String message) { - indexDoc(AuditMessage.TYPE.getPreferredName(), AuditMessage.newWarning(jobId, message, nodeName)); + indexDoc(AuditMessage.newWarning(jobId, message, nodeName)); } public void error(String jobId, String message) { - indexDoc(AuditMessage.TYPE.getPreferredName(), AuditMessage.newError(jobId, message, nodeName)); + indexDoc(AuditMessage.newError(jobId, message, nodeName)); } - private void indexDoc(String type, ToXContent toXContent) { - IndexRequest indexRequest = new IndexRequest(AuditorField.NOTIFICATIONS_INDEX, type); + private void indexDoc(ToXContent toXContent) { + IndexRequest indexRequest = new IndexRequest(AuditorField.NOTIFICATIONS_INDEX); indexRequest.source(toXContentBuilder(toXContent)); indexRequest.timeout(TimeValue.timeValueSeconds(5)); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, new ActionListener() { @Override public void onResponse(IndexResponse indexResponse) { - LOGGER.trace("Successfully persisted {}", type); + LOGGER.trace("Successfully persisted audit message"); } @Override public void onFailure(Exception e) { - LOGGER.debug(new ParameterizedMessage("Error writing {}", new Object[]{type}, e)); + LOGGER.debug("Error writing audit message", e); } }, client::index); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index 441317bcbe207..50d2515046a22 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -32,6 +32,7 @@ import java.util.Iterator; import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Phaser; import java.util.stream.Collectors; /** @@ -55,6 +56,7 @@ public class MlMemoryTracker implements LocalNodeMasterListener { private final ClusterService clusterService; private final JobManager jobManager; private final JobResultsProvider jobResultsProvider; + private final Phaser stopPhaser; private volatile boolean isMaster; private volatile Instant lastUpdateTime; private volatile Duration reassignmentRecheckInterval; @@ -65,6 +67,7 @@ public MlMemoryTracker(Settings settings, ClusterService clusterService, ThreadP this.clusterService = clusterService; this.jobManager = jobManager; this.jobResultsProvider = jobResultsProvider; + this.stopPhaser = new Phaser(1); setReassignmentRecheckInterval(PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING.get(settings)); clusterService.addLocalNodeMasterListener(this); clusterService.getClusterSettings().addSettingsUpdateConsumer( @@ -89,6 +92,23 @@ public void offMaster() { lastUpdateTime = null; } + /** + * Wait for all outstanding searches to complete. + * After returning, no new searches can be started. + */ + public void stop() { + logger.trace("ML memory tracker stop called"); + // We never terminate the phaser + assert stopPhaser.isTerminated() == false; + // If there are no registered parties or no unarrived parties then there is a flaw + // in the register/arrive/unregister logic in another method that uses the phaser + assert stopPhaser.getRegisteredParties() > 0; + assert stopPhaser.getUnarrivedParties() > 0; + stopPhaser.arriveAndAwaitAdvance(); + assert stopPhaser.getPhase() > 0; + logger.debug("ML memory tracker stopped"); + } + @Override public String executorName() { return MachineLearning.UTILITY_THREAD_POOL_NAME; @@ -146,13 +166,13 @@ public boolean asyncRefresh() { try { ActionListener listener = ActionListener.wrap( aVoid -> logger.trace("Job memory requirement refresh request completed successfully"), - e -> logger.error("Failed to refresh job memory requirements", e) + e -> logger.warn("Failed to refresh job memory requirements", e) ); threadPool.executor(executorName()).execute( () -> refresh(clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE), listener)); return true; } catch (EsRejectedExecutionException e) { - logger.debug("Couldn't schedule ML memory update - node might be shutting down", e); + logger.warn("Couldn't schedule ML memory update - node might be shutting down", e); } } @@ -246,25 +266,43 @@ public void refreshJobMemory(String jobId, ActionListener listener) { return; } + // The phaser prevents searches being started after the memory tracker's stop() method has returned + if (stopPhaser.register() != 0) { + // Phases above 0 mean we've been stopped, so don't do any operations that involve external interaction + stopPhaser.arriveAndDeregister(); + listener.onFailure(new EsRejectedExecutionException("Couldn't run ML memory update - node is shutting down")); + return; + } + ActionListener phaserListener = ActionListener.wrap( + r -> { + stopPhaser.arriveAndDeregister(); + listener.onResponse(r); + }, + e -> { + stopPhaser.arriveAndDeregister(); + listener.onFailure(e); + } + ); + try { jobResultsProvider.getEstablishedMemoryUsage(jobId, null, null, establishedModelMemoryBytes -> { if (establishedModelMemoryBytes <= 0L) { - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } else { Long memoryRequirementBytes = establishedModelMemoryBytes + Job.PROCESS_MEMORY_OVERHEAD.getBytes(); memoryRequirementByJob.put(jobId, memoryRequirementBytes); - listener.onResponse(memoryRequirementBytes); + phaserListener.onResponse(memoryRequirementBytes); } }, e -> { logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } ); } catch (Exception e) { logger.error("[" + jobId + "] failed to calculate job established model memory requirement", e); - setJobMemoryToLimit(jobId, listener); + setJobMemoryToLimit(jobId, phaserListener); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java index c3316b702552e..85d6281377f10 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteForecastAction.java @@ -36,6 +36,9 @@ public RestDeleteForecastAction(Settings settings, RestController controller) { "}/_forecast/{" + Forecast.FORECAST_ID.getPreferredName() + "}", this, DELETE, MachineLearning.PRE_V7_BASE_PATH + "anomaly_detectors/{" + Job.ID.getPreferredName() + "}/_forecast/{" + Forecast.FORECAST_ID.getPreferredName() + "}", deprecationLogger); + controller.registerHandler( + DELETE, MachineLearning.BASE_PATH + "anomaly_detectors/{" + Job.ID.getPreferredName() + + "}/_forecast/", this); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TypedChainTaskExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TypedChainTaskExecutor.java index 5af9c53649853..3ccf0f22da922 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TypedChainTaskExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TypedChainTaskExecutor.java @@ -43,7 +43,7 @@ public interface ChainTask { * {@code true} means continue on to the next task. * Must be able to handle null values. * @param failureShortCircuitPredicate The predicate on whether to short circuit execution on a give exception. - * {@code true} means that no more tasks should execute and the the listener::onFailure should be + * {@code true} means that no more tasks should execute and the listener::onFailure should be * called. */ public TypedChainTaskExecutor(ExecutorService executorService, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index 2f1aa29d919d9..def27ab3279f4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -19,6 +19,29 @@ public class MachineLearningTests extends ESTestCase { + public void testMaxMachineMemoryPercent_givenDefault() { + int maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(Settings.EMPTY); + assertEquals(30, maxMachineMemoryPercent); + } + + public void testMaxMachineMemoryPercent_givenValidSetting() { + Settings.Builder settings = Settings.builder(); + int expectedMaxMachineMemoryPercent = randomIntBetween(5, 200); + settings.put(MachineLearning.MAX_MACHINE_MEMORY_PERCENT.getKey(), expectedMaxMachineMemoryPercent); + int maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings.build()); + assertEquals(expectedMaxMachineMemoryPercent, maxMachineMemoryPercent); + } + + public void testMaxMachineMemoryPercent_givenInvalidSetting() { + Settings.Builder settings = Settings.builder(); + int invalidMaxMachineMemoryPercent = randomFrom(4, 201); + settings.put(MachineLearning.MAX_MACHINE_MEMORY_PERCENT.getKey(), invalidMaxMachineMemoryPercent); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings.build())); + assertThat(e.getMessage(), startsWith("Failed to parse value [" + invalidMaxMachineMemoryPercent + + "] for setting [xpack.ml.max_machine_memory_percent] must be")); + } + public void testNoAttributes_givenNoClash() { Settings.Builder builder = Settings.builder(); if (randomBoolean()) { @@ -62,8 +85,8 @@ public void testNoAttributes_givenClash() { public void testMachineMemory_givenStatsFailure() throws IOException { OsStats stats = mock(OsStats.class); - when(stats.getMem()).thenReturn(new OsStats.Mem(-1, -1)); - assertEquals(-1L, MachineLearning.machineMemoryFromStats(stats)); + when(stats.getMem()).thenReturn(new OsStats.Mem(0, 0)); + assertEquals(0L, MachineLearning.machineMemoryFromStats(stats)); } public void testMachineMemory_givenNoCgroup() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java index 4a70bcf02d3a5..4d07a93e1019c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigrationEligibilityCheckTests.java @@ -22,9 +22,11 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -52,6 +54,12 @@ public void setUpTests() { clusterService = mock(ClusterService.class); } + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testCanStartMigration_givenMigrationIsDisabled() { Settings settings = newSettings(false); givenClusterSettings(settings); @@ -327,7 +335,7 @@ public void testDatafeedIsEligibleForMigration_givenDatafeedNotInClusterState() public void testDatafeedIsEligibleForMigration_givenStartedDatafeed() { Job openJob = JobTests.buildJobBuilder("open-job").build(); MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); - mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap()); + mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap(), xContentRegistry()); String datafeedId = "df-" + openJob.getId(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -353,7 +361,7 @@ public void testDatafeedIsEligibleForMigration_givenStartedDatafeed() { public void testDatafeedIsEligibleForMigration_givenStartedDatafeedAndMigrationIsDisabled() { Job openJob = JobTests.buildJobBuilder("open-job").build(); MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(openJob, false); - mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap()); + mlMetadata.putDatafeed(createCompatibleDatafeed(openJob.getId()), Collections.emptyMap(), xContentRegistry()); String datafeedId = "df-" + openJob.getId(); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); @@ -379,7 +387,7 @@ public void testDatafeedIsEligibleForMigration_givenStartedDatafeedAndMigrationI public void testDatafeedIsEligibleForMigration_givenStoppedDatafeed() { Job job = JobTests.buildJobBuilder("closed-job").build(); MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(job, false); - mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap()); + mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap(), xContentRegistry()); String datafeedId = "df-" + job.getId(); MetaData.Builder metaData = MetaData.builder(); @@ -402,7 +410,7 @@ public void testDatafeedIsEligibleForMigration_givenStoppedDatafeed() { public void testDatafeedIsEligibleForMigration_givenUnallocatedDatafeed() { Job job = JobTests.buildJobBuilder("closed-job").build(); MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(job, false); - mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap()); + mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap(), xContentRegistry()); String datafeedId = "df-" + job.getId(); MetaData.Builder metaData = MetaData.builder(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java index 62c29efdff968..81d344fd1dd02 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlConfigMigratorTests.java @@ -14,8 +14,11 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -47,6 +50,12 @@ public class MlConfigMigratorTests extends ESTestCase { + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testNonDeletingJobs() { Job job1 = JobTests.buildJobBuilder("openjob1").build(); Job job2 = JobTests.buildJobBuilder("openjob2").build(); @@ -64,7 +73,7 @@ public void testClosedOrUnallocatedJobs() { .putJob(closedJob, false) .putJob(jobWithoutAllocation, false) .putJob(openJob, false) - .putDatafeed(createCompatibleDatafeed(closedJob.getId()), Collections.emptyMap()); + .putDatafeed(createCompatibleDatafeed(closedJob.getId()), Collections.emptyMap(), xContentRegistry()); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); tasksBuilder.addTask(MlTasks.jobTaskId("jobwithoutallocation"), MlTasks.JOB_TASK_NAME, @@ -103,9 +112,9 @@ public void testStoppedDatafeedConfigs() { .putJob(job1, false) .putJob(job2, false) .putJob(job3, false) - .putDatafeed(stopppedDatafeed, Collections.emptyMap()) - .putDatafeed(datafeedWithoutAllocation, Collections.emptyMap()) - .putDatafeed(startedDatafeed, Collections.emptyMap()); + .putDatafeed(stopppedDatafeed, Collections.emptyMap(), xContentRegistry()) + .putDatafeed(datafeedWithoutAllocation, Collections.emptyMap(), xContentRegistry()) + .putDatafeed(startedDatafeed, Collections.emptyMap(), xContentRegistry()); PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); tasksBuilder.addTask(MlTasks.datafeedTaskId(stopppedDatafeed.getId()), MlTasks.DATAFEED_TASK_NAME, @@ -206,8 +215,8 @@ public void testRemoveJobsAndDatafeeds_removeAll() { MlMetadata.Builder mlMetadata = new MlMetadata.Builder() .putJob(job1, false) .putJob(job2, false) - .putDatafeed(datafeedConfig1, Collections.emptyMap()) - .putDatafeed(datafeedConfig2, Collections.emptyMap()); + .putDatafeed(datafeedConfig1, Collections.emptyMap(), xContentRegistry()) + .putDatafeed(datafeedConfig2, Collections.emptyMap(), xContentRegistry()); MlConfigMigrator.RemovalResult removalResult = MlConfigMigrator.removeJobsAndDatafeeds( Arrays.asList(job1, job2), Arrays.asList(datafeedConfig1, datafeedConfig2), mlMetadata.build()); @@ -225,7 +234,7 @@ public void testRemoveJobsAndDatafeeds_removeSome() { MlMetadata.Builder mlMetadata = new MlMetadata.Builder() .putJob(job1, false) .putJob(job2, false) - .putDatafeed(datafeedConfig1, Collections.emptyMap()); + .putDatafeed(datafeedConfig1, Collections.emptyMap(), xContentRegistry()); MlConfigMigrator.RemovalResult removalResult = MlConfigMigrator.removeJobsAndDatafeeds( Arrays.asList(job1, JobTests.buildJobBuilder("job-none").build()), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java index 637b1089d9eb2..0e6df7db57ab8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java @@ -50,7 +50,7 @@ protected MlMetadata createTestInstance() { } job = new Job.Builder(job).setAnalysisConfig(analysisConfig).build(); builder.putJob(job, false); - builder.putDatafeed(datafeedConfig, Collections.emptyMap()); + builder.putDatafeed(datafeedConfig, Collections.emptyMap(), xContentRegistry()); } else { builder.putJob(job, false); } @@ -151,7 +151,7 @@ protected MlMetadata mutateInstance(MlMetadata instance) { metadataBuilder.putJob(entry.getValue(), true); } for (Map.Entry entry : datafeeds.entrySet()) { - metadataBuilder.putDatafeed(entry.getValue(), Collections.emptyMap()); + metadataBuilder.putDatafeed(entry.getValue(), Collections.emptyMap(), xContentRegistry()); } switch (between(0, 1)) { @@ -172,7 +172,7 @@ protected MlMetadata mutateInstance(MlMetadata instance) { } randomJob = new Job.Builder(randomJob).setAnalysisConfig(analysisConfig).build(); metadataBuilder.putJob(randomJob, false); - metadataBuilder.putDatafeed(datafeedConfig, Collections.emptyMap()); + metadataBuilder.putDatafeed(datafeedConfig, Collections.emptyMap(), xContentRegistry()); break; default: throw new AssertionError("Illegal randomisation branch"); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 6c14423d9acdb..bdb1be97bd2e8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -9,17 +9,23 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.license.LicenseService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + /** * An extension to {@link ESSingleNodeTestCase} that adds node settings specifically needed for ML test cases. */ @@ -41,11 +47,44 @@ protected Settings nodeSettings() { return newSettings.build(); } + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Override protected Collection> getPlugins() { return pluginList(LocalStateMachineLearning.class); } + /** + * This cleanup is to fix the problem described in + * https://github.com/elastic/elasticsearch/issues/38952 + */ + @Override + public void tearDown() throws Exception { + try { + logger.trace("[{}#{}]: ML-specific after test cleanup", getTestClass().getSimpleName(), getTestName()); + String[] nonAnnotationMlIndices; + boolean mlAnnotationsIndexExists; + do { + String[] mlIndices = client().admin().indices().prepareGetIndex().addIndices(".ml-*").get().indices(); + nonAnnotationMlIndices = Arrays.stream(mlIndices).filter(name -> name.startsWith(".ml-annotations") == false) + .toArray(String[]::new); + mlAnnotationsIndexExists = mlIndices.length > nonAnnotationMlIndices.length; + } while (nonAnnotationMlIndices.length > 0 && mlAnnotationsIndexExists == false); + if (nonAnnotationMlIndices.length > 0) { + // Delete the ML indices apart from the annotations index. The annotations index will be deleted by the + // base class cleanup. We want to delete all the others first so that the annotations index doesn't get + // automatically recreated. + assertAcked(client().admin().indices().prepareDelete(nonAnnotationMlIndices).get()); + } + } finally { + super.tearDown(); + } + } + protected void waitForMlTemplates() throws Exception { // block until the templates are installed assertBusy(() -> { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 21a4fb8763eec..2216aa0c39167 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -7,7 +7,10 @@ package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -33,12 +36,18 @@ public class TransportStartDatafeedActionTests extends ESTestCase { + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testValidate_jobClosed() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder().build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); Exception e = expectThrows(ElasticsearchStatusException.class, - () -> TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks)); + () -> TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks, xContentRegistry())); assertThat(e.getMessage(), equalTo("cannot start datafeed [foo-datafeed] because job [job_id] is closed")); } @@ -49,7 +58,7 @@ public void testValidate_jobOpening() { PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); - TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks); + TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks, xContentRegistry()); } public void testValidate_jobOpened() { @@ -59,19 +68,19 @@ public void testValidate_jobOpened() { PersistentTasksCustomMetaData tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); - TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks); + TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks, xContentRegistry()); } public void testDeprecationsLogged() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("start-data-feed-test", job1.getId()); DatafeedConfig config = spy(datafeedConfig.build()); - doReturn(Collections.singletonList("Deprecated Agg")).when(config).getAggDeprecations(); - doReturn(Collections.singletonList("Deprecated Query")).when(config).getQueryDeprecations(); + doReturn(Collections.singletonList("Deprecated Agg")).when(config).getAggDeprecations(any(NamedXContentRegistry.class)); + doReturn(Collections.singletonList("Deprecated Query")).when(config).getQueryDeprecations(any(NamedXContentRegistry.class)); Auditor auditor = mock(Auditor.class); - TransportStartDatafeedAction.auditDeprecations(config, job1, auditor); + TransportStartDatafeedAction.auditDeprecations(config, job1, auditor, xContentRegistry()); verify(auditor).warning(job1.getId(), "datafeed [start-data-feed-test] configuration has deprecations. [Deprecated Agg, Deprecated Query]"); @@ -81,12 +90,12 @@ public void testNoDeprecationsLogged() { Job job1 = DatafeedManagerTests.createDatafeedJob().build(new Date()); DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("start-data-feed-test", job1.getId()); DatafeedConfig config = spy(datafeedConfig.build()); - doReturn(Collections.emptyList()).when(config).getAggDeprecations(); - doReturn(Collections.emptyList()).when(config).getQueryDeprecations(); + doReturn(Collections.emptyList()).when(config).getAggDeprecations(any(NamedXContentRegistry.class)); + doReturn(Collections.emptyList()).when(config).getQueryDeprecations(any(NamedXContentRegistry.class)); Auditor auditor = mock(Auditor.class); - TransportStartDatafeedAction.auditDeprecations(config, job1, auditor); + TransportStartDatafeedAction.auditDeprecations(config, job1, auditor, xContentRegistry()); verify(auditor, never()).warning(any(), any()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java index 2540ab8cde8ef..8d8bd84a97c12 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java @@ -370,7 +370,7 @@ public void testExtractionProblem() throws Exception { verify(client, never()).execute(same(PersistJobAction.INSTANCE), any()); } - public void testPostAnalysisProblem() throws Exception { + public void testPostAnalysisProblem() { client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); @@ -378,6 +378,8 @@ public void testPostAnalysisProblem() throws Exception { when(client.execute(same(FlushJobAction.INSTANCE), any())).thenReturn(flushJobFuture); when(client.execute(same(PostDataAction.INSTANCE), any())).thenThrow(new RuntimeException()); + when(dataExtractor.getEndTime()).thenReturn(1000L); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L)); @@ -397,7 +399,7 @@ public void testPostAnalysisProblem() throws Exception { verify(client, never()).execute(same(PersistJobAction.INSTANCE), any()); } - public void testPostAnalysisProblemIsConflict() throws Exception { + public void testPostAnalysisProblemIsConflict() { client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); @@ -405,6 +407,8 @@ public void testPostAnalysisProblemIsConflict() throws Exception { when(client.execute(same(FlushJobAction.INSTANCE), any())).thenReturn(flushJobFuture); when(client.execute(same(PostDataAction.INSTANCE), any())).thenThrow(ExceptionsHelper.conflictStatusException("conflict")); + when(dataExtractor.getEndTime()).thenReturn(1000L); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L)); @@ -424,7 +428,7 @@ public void testPostAnalysisProblemIsConflict() throws Exception { verify(client, never()).execute(same(PersistJobAction.INSTANCE), any()); } - public void testFlushAnalysisProblem() throws Exception { + public void testFlushAnalysisProblem() { when(client.execute(same(FlushJobAction.INSTANCE), any())).thenThrow(new RuntimeException()); currentTime = 60000L; @@ -436,7 +440,7 @@ public void testFlushAnalysisProblem() throws Exception { assertThat(analysisProblemException.shouldStop, is(false)); } - public void testFlushAnalysisProblemIsConflict() throws Exception { + public void testFlushAnalysisProblemIsConflict() { when(client.execute(same(FlushJobAction.INSTANCE), any())).thenThrow(ExceptionsHelper.conflictStatusException("conflict")); currentTime = 60000L; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java index 3c34c2e1d6790..00db9462c6087 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java @@ -6,7 +6,10 @@ package org.elasticsearch.xpack.ml.datafeed; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; @@ -28,6 +31,12 @@ public class DatafeedJobValidatorTests extends ESTestCase { + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testValidate_GivenNonZeroLatency() { String errorMessage = Messages.getMessage(Messages.DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY); Job.Builder builder = buildJobBuilder("foo"); @@ -39,7 +48,7 @@ public void testValidate_GivenNonZeroLatency() { DatafeedConfig datafeedConfig = createValidDatafeedConfig().build(); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedConfig, job)); + () -> DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry())); assertEquals(errorMessage, e.getMessage()); } @@ -53,7 +62,7 @@ public void testVerify_GivenZeroLatency() { Job job = builder.build(new Date()); DatafeedConfig datafeedConfig = createValidDatafeedConfig().build(); - DatafeedJobValidator.validate(datafeedConfig, job); + DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry()); } public void testVerify_GivenNoLatency() { @@ -64,7 +73,7 @@ public void testVerify_GivenNoLatency() { Job job = builder.build(new Date()); DatafeedConfig datafeedConfig = createValidDatafeedConfig().build(); - DatafeedJobValidator.validate(datafeedConfig, job); + DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry()); } public void testVerify_GivenAggsAndNoSummaryCountField() throws IOException { @@ -79,7 +88,7 @@ public void testVerify_GivenAggsAndNoSummaryCountField() throws IOException { DatafeedConfig datafeedConfig = createValidDatafeedConfigWithAggs(1800.0).build(); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedConfig, job)); + () -> DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry())); assertEquals(errorMessage, e.getMessage()); } @@ -96,7 +105,7 @@ public void testVerify_GivenAggsAndEmptySummaryCountField() throws IOException { DatafeedConfig datafeedConfig = createValidDatafeedConfigWithAggs(1800.0).build(); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedConfig, job)); + () -> DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry())); assertEquals(errorMessage, e.getMessage()); } @@ -109,7 +118,7 @@ public void testVerify_GivenAggsAndSummaryCountField() throws IOException { builder.setAnalysisConfig(ac); Job job = builder.build(new Date()); DatafeedConfig datafeedConfig = createValidDatafeedConfigWithAggs(900.0).build(); - DatafeedJobValidator.validate(datafeedConfig, job); + DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry()); } public void testVerify_GivenHistogramIntervalGreaterThanBucketSpan() throws IOException { @@ -122,7 +131,7 @@ public void testVerify_GivenHistogramIntervalGreaterThanBucketSpan() throws IOEx DatafeedConfig datafeedConfig = createValidDatafeedConfigWithAggs(1800001.0).build(); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedConfig, job)); + () -> DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry())); assertEquals("Aggregation interval [1800001ms] must be less than or equal to the bucket_span [1800000ms]", e.getMessage()); } @@ -137,11 +146,11 @@ public void testVerify_HistogramIntervalIsDivisorOfBucketSpan() throws IOExcepti DatafeedConfig datafeedConfig = createValidDatafeedConfigWithAggs(37 * 1000).build(); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedConfig, job)); + () -> DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry())); assertEquals("Aggregation interval [37000ms] must be a divisor of the bucket_span [300000ms]", e.getMessage()); DatafeedConfig goodDatafeedConfig = createValidDatafeedConfigWithAggs(60 * 1000).build(); - DatafeedJobValidator.validate(goodDatafeedConfig, job); + DatafeedJobValidator.validate(goodDatafeedConfig, job, xContentRegistry()); } public void testVerify_FrequencyIsMultipleOfHistogramInterval() throws IOException { @@ -155,25 +164,25 @@ public void testVerify_FrequencyIsMultipleOfHistogramInterval() throws IOExcepti // Check with multiples datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(60)); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(120)); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(180)); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(240)); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); datafeedBuilder.setFrequency(TimeValue.timeValueHours(1)); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); // Now non-multiples datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(30)); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry())); assertEquals("Datafeed frequency [30s] must be a multiple of the aggregation interval [60000ms]", e.getMessage()); datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(90)); e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry())); assertEquals("Datafeed frequency [1.5m] must be a multiple of the aggregation interval [60000ms]", e.getMessage()); } @@ -187,16 +196,16 @@ public void testVerify_BucketIntervalAndDataCheckWindowAreValid() { DatafeedConfig.Builder datafeedBuilder = createValidDatafeedConfig(); datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueMinutes(10))); - DatafeedJobValidator.validate(datafeedBuilder.build(), job); + DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry()); datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueSeconds(1))); ElasticsearchStatusException e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry())); assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, "1s", "2s"), e.getMessage()); datafeedBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(24))); e = ESTestCase.expectThrows(ElasticsearchStatusException.class, - () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job)); + () -> DatafeedJobValidator.validate(datafeedBuilder.build(), job, xContentRegistry())); assertEquals(Messages.getMessage( Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, "1d", "2s"), e.getMessage()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index ff4160ffe4d19..4f186164760aa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.test.ESTestCase; @@ -34,12 +33,9 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; -import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction.DatafeedTask; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedActionTests; -import org.elasticsearch.xpack.ml.job.persistence.MockClientBuilder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.junit.Before; @@ -98,12 +94,6 @@ public void setUpTests() { clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(cs.build()); - - ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(XContentBuilder.class); - Client client = new MockClientBuilder("foo") - .prepareIndex(AuditorField.NOTIFICATIONS_INDEX, AuditMessage.TYPE.getPreferredName(), "responseId", argumentCaptor) - .build(); - DiscoveryNode dNode = mock(DiscoveryNode.class); when(dNode.getName()).thenReturn("this_node_has_a_name"); when(clusterService.localNode()).thenReturn(dNode); @@ -136,8 +126,8 @@ public void setUpTests() { AutodetectProcessManager autodetectProcessManager = mock(AutodetectProcessManager.class); doAnswer(invocation -> hasOpenAutodetectCommunicator.get()).when(autodetectProcessManager).hasOpenAutodetectCommunicator(anyLong()); - datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder, () -> currentTime, auditor, - autodetectProcessManager); + datafeedManager = new DatafeedManager(threadPool, mock(Client.class), clusterService, datafeedJobBuilder, + () -> currentTime, auditor, autodetectProcessManager); verify(clusterService).addListener(capturedClusterStateListener.capture()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java index d776b720ed288..8857472062f81 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java @@ -6,7 +6,10 @@ package org.elasticsearch.xpack.ml.datafeed.delayeddatacheck; import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; @@ -26,29 +29,35 @@ public class DelayedDataDetectorFactoryTests extends ESTestCase { + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testBuilder() { Job job = createJob(TimeValue.timeValueSeconds(2)); DatafeedConfig datafeedConfig = createDatafeed(false, null); // Should not throw - assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)), + assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class), xContentRegistry()), instanceOf(NullDelayedDataDetector.class)); datafeedConfig = createDatafeed(true, TimeValue.timeValueMinutes(10)); // Should not throw - assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)), + assertThat(DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class), xContentRegistry()), instanceOf(DatafeedDelayedDataDetector.class)); DatafeedConfig tooSmallDatafeedConfig = createDatafeed(true, TimeValue.timeValueSeconds(1)); IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, - () -> DelayedDataDetectorFactory.buildDetector(job, tooSmallDatafeedConfig, mock(Client.class))); + () -> DelayedDataDetectorFactory.buildDetector(job, tooSmallDatafeedConfig, mock(Client.class), xContentRegistry())); assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL, "1s", "2s"), e.getMessage()); DatafeedConfig tooBigDatafeedConfig = createDatafeed(true, TimeValue.timeValueHours(12)); e = ESTestCase.expectThrows(IllegalArgumentException.class, - () -> DelayedDataDetectorFactory.buildDetector(job, tooBigDatafeedConfig, mock(Client.class))); + () -> DelayedDataDetectorFactory.buildDetector(job, tooBigDatafeedConfig, mock(Client.class), xContentRegistry())); assertEquals(Messages.getMessage( Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, "12h", "2s"), e.getMessage()); @@ -57,14 +66,14 @@ public void testBuilder() { // Should not throw DelayedDataDetector delayedDataDetector = - DelayedDataDetectorFactory.buildDetector(withBigBucketSpan, datafeedConfig, mock(Client.class)); + DelayedDataDetectorFactory.buildDetector(withBigBucketSpan, datafeedConfig, mock(Client.class), xContentRegistry()); assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(1).millis() * 8)); datafeedConfig = createDatafeed(true, null); // Should not throw delayedDataDetector = - DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class)); + DelayedDataDetectorFactory.buildDetector(job, datafeedConfig, mock(Client.class), xContentRegistry()); assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(2).millis())); } @@ -98,5 +107,4 @@ private DatafeedConfig createDatafeed(boolean shouldDetectDelayedData, TimeValue return builder.build(); } - } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 1478a485cc44e..dee28e71a7bf7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -61,6 +63,12 @@ public class DataExtractorFactoryTests extends ESTestCase { private Client client; + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Before public void setUpTests() { client = mock(Client.class); @@ -101,7 +109,7 @@ public void testCreateDataExtractorFactoryGivenDefaultScroll() { e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig, jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig, jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenScrollWithAutoChunk() { @@ -117,7 +125,7 @@ public void testCreateDataExtractorFactoryGivenScrollWithAutoChunk() { e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenScrollWithOffChunk() { @@ -133,7 +141,7 @@ public void testCreateDataExtractorFactoryGivenScrollWithOffChunk() { e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenDefaultAggregation() { @@ -151,7 +159,7 @@ public void testCreateDataExtractorFactoryGivenDefaultAggregation() { e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenAggregationWithOffChunk() { @@ -170,7 +178,7 @@ public void testCreateDataExtractorFactoryGivenAggregationWithOffChunk() { e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenDefaultAggregationWithAutoChunk() { @@ -189,7 +197,7 @@ public void testCreateDataExtractorFactoryGivenDefaultAggregationWithAutoChunk() e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupAndValidAggregation() { @@ -209,7 +217,7 @@ public void testCreateDataExtractorFactoryGivenRollupAndValidAggregation() { dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(RollupDataExtractorFactory.class)), e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupAndValidAggregationAndAutoChunk() { @@ -229,7 +237,7 @@ public void testCreateDataExtractorFactoryGivenRollupAndValidAggregationAndAutoC dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(ChunkedDataExtractorFactory.class)), e -> fail() ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupButNoAggregations() { @@ -249,7 +257,7 @@ public void testCreateDataExtractorFactoryGivenRollupButNoAggregations() { } ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupWithBadInterval() { @@ -274,7 +282,7 @@ public void testCreateDataExtractorFactoryGivenRollupWithBadInterval() { assertThat(e, instanceOf(IllegalArgumentException.class)); } ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupMissingTerms() { @@ -298,7 +306,7 @@ public void testCreateDataExtractorFactoryGivenRollupMissingTerms() { assertThat(e, instanceOf(IllegalArgumentException.class)); } ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } public void testCreateDataExtractorFactoryGivenRollupMissingMetric() { @@ -322,7 +330,7 @@ public void testCreateDataExtractorFactoryGivenRollupMissingMetric() { assertThat(e, instanceOf(IllegalArgumentException.class)); } ); - DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), listener); + DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); } private void givenAggregatableRollup(String field, String type, int minuteInterval, String... groupByTerms) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java index c9a2e8712e243..fdfd75759db13 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.aggregation; import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.test.ESTestCase; @@ -17,6 +20,7 @@ import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import static org.hamcrest.Matchers.equalTo; @@ -31,6 +35,12 @@ public void setUpMocks() { client = mock(Client.class); } + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + public void testNewExtractor_GivenAlignedTimes() { AggregationDataExtractorFactory factory = createFactory(1000L); @@ -66,6 +76,6 @@ private AggregationDataExtractorFactory createFactory(long histogramInterval) { DatafeedConfig.Builder datafeedConfigBuilder = new DatafeedConfig.Builder("foo-feed", jobBuilder.getId()); datafeedConfigBuilder.setParsedAggregations(aggs); datafeedConfigBuilder.setIndices(Arrays.asList("my_index")); - return new AggregationDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date())); + return new AggregationDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date()), xContentRegistry()); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java index 77a8c936beb37..058e3c9e78645 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.chunked; import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.test.ESTestCase; @@ -18,6 +21,7 @@ import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import static org.hamcrest.Matchers.equalTo; @@ -28,6 +32,12 @@ public class ChunkedDataExtractorFactoryTests extends ESTestCase { private Client client; private DataExtractorFactory dataExtractorFactory; + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Before public void setUpMocks() { client = mock(Client.class); @@ -93,6 +103,7 @@ private ChunkedDataExtractorFactory createFactory(long histogramInterval) { DatafeedConfig.Builder datafeedConfigBuilder = new DatafeedConfig.Builder("foo-feed", jobBuilder.getId()); datafeedConfigBuilder.setParsedAggregations(aggs); datafeedConfigBuilder.setIndices(Arrays.asList("my_index")); - return new ChunkedDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date()), dataExtractorFactory); + return new ChunkedDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date()), + xContentRegistry(), dataExtractorFactory); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index c8e53dfcf7d3c..406f1a5fa9024 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -596,5 +596,10 @@ public boolean isCancelled() { public void cancel() { // do nothing } + + @Override + public long getEndTime() { + return 0; + } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 170c95d1cab6b..131d47de38db6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -296,6 +296,9 @@ public void testExtractionGivenContinueScrollResponseHasError() throws IOExcepti extractor.setNextResponse(createErrorResponse()); assertThat(extractor.hasNext(), is(true)); expectThrows(IOException.class, extractor::next); + + List capturedClearScrollIds = getCapturedClearScrollIds(); + assertThat(capturedClearScrollIds.size(), equalTo(1)); } public void testExtractionGivenInitSearchResponseHasShardFailures() throws IOException { @@ -305,6 +308,11 @@ public void testExtractionGivenInitSearchResponseHasShardFailures() throws IOExc assertThat(extractor.hasNext(), is(true)); expectThrows(IOException.class, extractor::next); + + List capturedClearScrollIds = getCapturedClearScrollIds(); + // We should clear the scroll context twice: once for the first search when we retry + // and once after the retry where we'll have an exception + assertThat(capturedClearScrollIds.size(), equalTo(2)); } public void testExtractionGivenInitSearchResponseEncounteredUnavailableShards() throws IOException { @@ -341,6 +349,9 @@ public void testResetScrollAfterShardFailure() throws IOException { // A second failure is not tolerated assertThat(extractor.hasNext(), is(true)); expectThrows(IOException.class, extractor::next); + + List capturedClearScrollIds = getCapturedClearScrollIds(); + assertThat(capturedClearScrollIds.size(), equalTo(2)); } public void testResetScollUsesLastResultTimestamp() throws IOException { @@ -397,6 +408,9 @@ public void testResetScrollAfterSearchPhaseExecutionException() throws IOExcepti // A second failure is not tolerated assertThat(extractor.hasNext(), is(true)); expectThrows(SearchPhaseExecutionException.class, extractor::next); + + List capturedClearScrollIds = getCapturedClearScrollIds(); + assertThat(capturedClearScrollIds.size(), equalTo(2)); } public void testSearchPhaseExecutionExceptionOnInitScroll() throws IOException { @@ -408,7 +422,9 @@ public void testSearchPhaseExecutionExceptionOnInitScroll() throws IOException { expectThrows(IOException.class, extractor::next); List capturedClearScrollIds = getCapturedClearScrollIds(); - assertThat(capturedClearScrollIds.isEmpty(), is(true)); + // We should clear the scroll context twice: once for the first search when we retry + // and once after the retry where we'll have an exception + assertThat(capturedClearScrollIds.size(), equalTo(2)); } public void testDomainSplitScriptField() throws IOException { @@ -496,6 +512,7 @@ private SearchResponse createSearchResponse(List timestamps, List private SearchResponse createErrorResponse() { SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.status()).thenReturn(RestStatus.INTERNAL_SERVER_ERROR); + when(searchResponse.getScrollId()).thenReturn(randomAlphaOfLength(1000)); return searchResponse; } @@ -505,6 +522,7 @@ private SearchResponse createResponseWithShardFailures() { when(searchResponse.getShardFailures()).thenReturn( new ShardSearchFailure[] { new ShardSearchFailure(new RuntimeException("shard failed"))}); when(searchResponse.getFailedShards()).thenReturn(1); + when(searchResponse.getScrollId()).thenReturn(randomAlphaOfLength(1000)); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java index f6ff80edeec02..b1879eb07f192 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java @@ -57,7 +57,7 @@ public class JobConfigProviderIT extends MlSingleNodeTestCase { @Before public void createComponents() throws Exception { - jobConfigProvider = new JobConfigProvider(client()); + jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); waitForMlTemplates(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 02cc738477cfb..d5018c3ef13f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -5,13 +5,19 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -32,6 +38,7 @@ import org.elasticsearch.xpack.core.ml.job.config.RuleAction; import org.elasticsearch.xpack.core.ml.job.config.RuleScope; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCountsTests; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; @@ -55,6 +62,7 @@ import java.util.Date; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; @@ -79,6 +87,55 @@ public void createComponents() throws Exception { waitForMlTemplates(); } + @AwaitsFix(bugUrl ="https://github.com/elastic/elasticsearch/issues/40134") + public void testMultipleSimultaneousJobCreations() { + + int numJobs = randomIntBetween(4, 7); + + // Each job should result in one extra field being added to the results index mappings: field1, field2, field3, etc. + // Due to all being created simultaneously this test may reveal race conditions in the code that updates the mappings. + List requests = new ArrayList<>(numJobs); + for (int i = 1; i <= numJobs; ++i) { + Job.Builder builder = new Job.Builder("job" + i); + AnalysisConfig.Builder ac = createAnalysisConfig("field" + i, Collections.emptyList()); + DataDescription.Builder dc = new DataDescription.Builder(); + builder.setAnalysisConfig(ac); + builder.setDataDescription(dc); + + requests.add(new PutJobAction.Request(builder)); + } + + // Start the requests as close together as possible, without waiting for each to complete before starting the next one. + List> futures = new ArrayList<>(numJobs); + for (PutJobAction.Request request : requests) { + futures.add(client().execute(PutJobAction.INSTANCE, request)); + } + + // Only after all requests are in-flight, wait for all the requests to complete. + for (ActionFuture future : futures) { + future.actionGet(); + } + + // Assert that the mappings contain all the additional fields: field1, field2, field3, etc. + String sharedResultsIndex = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + GetMappingsRequest request = new GetMappingsRequest().indices(sharedResultsIndex); + GetMappingsResponse response = client().execute(GetMappingsAction.INSTANCE, request).actionGet(); + ImmutableOpenMap> indexMappings = response.getMappings(); + assertNotNull(indexMappings); + ImmutableOpenMap typeMappings = indexMappings.get(sharedResultsIndex); + assertNotNull("expected " + sharedResultsIndex + " in " + indexMappings, typeMappings); + assertEquals("expected 1 type in " + typeMappings, 1, typeMappings.size()); + Map mappings = typeMappings.iterator().next().value.getSourceAsMap(); + assertNotNull(mappings); + @SuppressWarnings("unchecked") + Map properties = (Map) mappings.get("properties"); + assertNotNull("expected 'properties' field in " + mappings, properties); + for (int i = 1; i <= numJobs; ++i) { + String fieldName = "field" + i; + assertNotNull("expected '" + fieldName + "' field in " + properties, properties.get(fieldName)); + } + } + public void testGetCalandarByJobId() throws Exception { List calendars = new ArrayList<>(); calendars.add(new Calendar("empty calendar", Collections.emptyList(), null)); @@ -468,7 +525,7 @@ private Job.Builder createJob(String jobId, List filterIds) { private Job.Builder createJob(String jobId, List filterIds, List jobGroups) { Job.Builder builder = new Job.Builder(jobId); builder.setGroups(jobGroups); - AnalysisConfig.Builder ac = createAnalysisConfig(filterIds); + AnalysisConfig.Builder ac = createAnalysisConfig("by_field", filterIds); DataDescription.Builder dc = new DataDescription.Builder(); builder.setAnalysisConfig(ac); builder.setDataDescription(dc); @@ -478,14 +535,14 @@ private Job.Builder createJob(String jobId, List filterIds, List return builder; } - private AnalysisConfig.Builder createAnalysisConfig(List filterIds) { + private AnalysisConfig.Builder createAnalysisConfig(String byFieldName, List filterIds) { Detector.Builder detector = new Detector.Builder("mean", "field"); - detector.setByFieldName("by_field"); + detector.setByFieldName(byFieldName); List rules = new ArrayList<>(); for (String filterId : filterIds) { RuleScope.Builder ruleScope = RuleScope.builder(); - ruleScope.include("by_field", filterId); + ruleScope.include(byFieldName, filterId); rules.add(new DetectionRule.Builder(ruleScope).setActions(RuleAction.SKIP_RESULT).build()); } @@ -499,7 +556,7 @@ private void indexScheduledEvents(List events) throws IOExceptio bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (ScheduledEvent event : events) { - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(event.toXContent(builder, params)); @@ -542,7 +599,7 @@ private void indexFilters(List filters) throws IOException { bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (MlFilter filter : filters) { - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId()); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME).id(filter.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(filter.toXContent(builder, params)); @@ -572,7 +629,7 @@ private void indexCalendars(List calendars) throws IOException { bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (Calendar calendar: calendars) { - IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, calendar.documentId()); + IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME).id(calendar.documentId()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.INCLUDE_TYPE, "true")); indexRequest.source(calendar.toXContent(builder, params)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java index 4ee76a4b1ab21..69b82ef984671 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlConfigMigratorIT.java @@ -5,9 +5,10 @@ */ package org.elasticsearch.xpack.ml.integration; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -28,7 +29,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -39,7 +39,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.ml.MlConfigMigrationEligibilityCheck; import org.elasticsearch.xpack.ml.MlConfigMigrator; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -49,10 +48,12 @@ import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -91,7 +92,7 @@ public void testWriteConfigToIndex() throws InterruptedException { final String indexJobId = "job-already-migrated"; // Add a job to the index - JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); Job indexJob = buildJobBuilder(indexJobId).build(); // Same as index job but has extra fields in its custom settings // which will be used to check the config was overwritten @@ -140,7 +141,7 @@ public void testMigrateConfigs() throws InterruptedException, IOException { DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-1", "job-foo"); builder.setIndices(Collections.singletonList("beats*")); - mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap(), xContentRegistry()); MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); @@ -150,9 +151,13 @@ public void testMigrateConfigs() throws InterruptedException, IOException { .routingTable(routingTable.build()) .build(); when(clusterService.state()).thenReturn(clusterState); - + List customs = new ArrayList<>(); doAnswer(invocation -> { ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; + ClusterState result = listener.execute(clusterState); + for (ObjectCursor value : result.metaData().customs().values()){ + customs.add(value.value); + } listener.clusterStateProcessed("source", mock(ClusterState.class), mock(ClusterState.class)); return null; }).when(clusterService).submitStateUpdateTask(eq("remove-migrated-ml-configs"), any()); @@ -166,13 +171,16 @@ public void testMigrateConfigs() throws InterruptedException, IOException { blockingCall(actionListener -> mlConfigMigrator.migrateConfigs(clusterState, actionListener), responseHolder, exceptionHolder); + // Verify that we have custom values in the new cluster state and that none of them is null + assertThat(customs.size(), greaterThan(0)); + assertThat(customs.stream().anyMatch(Objects::isNull), is(false)); assertNull(exceptionHolder.get()); assertTrue(responseHolder.get()); assertSnapshot(mlMetadata.build()); // check the jobs have been migrated AtomicReference> jobsHolder = new AtomicReference<>(); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), jobsHolder, exceptionHolder); @@ -213,13 +221,12 @@ public void testExistingSnapshotDoesNotBlockMigration() throws InterruptedExcept AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client(), clusterService.state(), future); future.actionGet(); - IndexRequestBuilder indexRequest = client().prepareIndex(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), - ElasticsearchMappings.DOC_TYPE, "ml-config") - .setSource(Collections.singletonMap("a_field", "a_value")) - .setOpType(DocWriteRequest.OpType.CREATE) + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexWriteAlias()).id("ml-config") + .source(Collections.singletonMap("a_field", "a_value")) + .opType(DocWriteRequest.OpType.CREATE) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - indexRequest.execute().actionGet(); + client().index(indexRequest).actionGet(); doAnswer(invocation -> { ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1]; @@ -242,7 +249,7 @@ public void testExistingSnapshotDoesNotBlockMigration() throws InterruptedExcept // check the jobs have been migrated AtomicReference> jobsHolder = new AtomicReference<>(); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), jobsHolder, exceptionHolder); @@ -264,7 +271,7 @@ public void testMigrateConfigs_GivenLargeNumberOfJobsAndDatafeeds() throws Inter for (int i = 0; i < datafeedCount; i++) { DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-" + i, "job-" + i); builder.setIndices(Collections.singletonList("beats*")); - mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap(), xContentRegistry()); } MetaData.Builder metaData = MetaData.builder(); @@ -295,7 +302,7 @@ public void testMigrateConfigs_GivenLargeNumberOfJobsAndDatafeeds() throws Inter // check the jobs have been migrated AtomicReference> jobsHolder = new AtomicReference<>(); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), jobsHolder, exceptionHolder); @@ -346,7 +353,7 @@ public void testMigrateConfigsWithoutTasks_GivenMigrationIsDisabled() throws Int mlMetadata.putJob(buildJobBuilder("job-bar").build(), false); DatafeedConfig.Builder builder = new DatafeedConfig.Builder("df-1", "job-foo"); builder.setIndices(Collections.singletonList("beats*")); - mlMetadata.putDatafeed(builder.build(), Collections.emptyMap()); + mlMetadata.putDatafeed(builder.build(), Collections.emptyMap(), xContentRegistry()); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) .metaData(MetaData.builder() @@ -366,7 +373,7 @@ public void testMigrateConfigsWithoutTasks_GivenMigrationIsDisabled() throws Int // check the jobs have not been migrated AtomicReference> jobsHolder = new AtomicReference<>(); - JobConfigProvider jobConfigProvider = new JobConfigProvider(client()); + JobConfigProvider jobConfigProvider = new JobConfigProvider(client(), xContentRegistry()); blockingCall(actionListener -> jobConfigProvider.expandJobs("*", true, true, actionListener), jobsHolder, exceptionHolder); assertNull(exceptionHolder.get()); @@ -386,7 +393,6 @@ public void assertSnapshot(MlMetadata expectedMlMetadata) throws IOException { client().admin().indices().prepareRefresh(AnomalyDetectorsIndex.jobStateIndexPattern()).get(); SearchResponse searchResponse = client() .prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) - .setTypes(ElasticsearchMappings.DOC_TYPE) .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds("ml-config")) .get(); @@ -395,7 +401,7 @@ public void assertSnapshot(MlMetadata expectedMlMetadata) throws IOException { try (InputStream stream = searchResponse.getHits().getAt(0).getSourceRef().streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, stream)) { MlMetadata recoveredMeta = MlMetadata.LENIENT_PARSER.apply(parser, null).build(); assertEquals(expectedMlMetadata, recoveredMeta); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index d68fe5225fb16..fd402f6d2183f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -62,6 +62,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFailOver() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -105,6 +106,7 @@ public void testLoseDedicatedMasterNode() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37117") public void testFullClusterRestart() throws Exception { internalCluster().ensureAtLeastNumDataNodes(3); ensureStableClusterOnAllNodes(3); @@ -157,22 +159,15 @@ public void testCloseUnassignedJobAndDatafeed() throws Exception { client().execute(GetDatafeedsStatsAction.INSTANCE, datafeedStatsRequest).actionGet(); assertEquals(DatafeedState.STARTED, datafeedStatsResponse.getResponse().results().get(0).getDatafeedState()); - // Can't normal stop an unassigned datafeed + // An unassigned datafeed can be stopped either normally or by force StopDatafeedAction.Request stopDatafeedRequest = new StopDatafeedAction.Request(datafeedId); - ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, - () -> client().execute(StopDatafeedAction.INSTANCE, stopDatafeedRequest).actionGet()); - assertEquals("Cannot stop datafeed [" + datafeedId + - "] because the datafeed does not have an assigned node. Use force stop to stop the datafeed", - statusException.getMessage()); - - // Can only force stop an unassigned datafeed - stopDatafeedRequest.setForce(true); + stopDatafeedRequest.setForce(randomBoolean()); StopDatafeedAction.Response stopDatafeedResponse = client().execute(StopDatafeedAction.INSTANCE, stopDatafeedRequest).actionGet(); assertTrue(stopDatafeedResponse.isStopped()); // Can't normal stop an unassigned job CloseJobAction.Request closeJobRequest = new CloseJobAction.Request(jobId); - statusException = expectThrows(ElasticsearchStatusException.class, + ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, () -> client().execute(CloseJobAction.INSTANCE, closeJobRequest).actionGet()); assertEquals("Cannot close job [" + jobId + "] because the job does not have an assigned node. Use force close to close the job", diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java index 8a257baa3d628..e05263014d33a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/NetworkDisruptionIT.java @@ -38,6 +38,12 @@ protected Collection> nodePlugins() { return plugins; } + // Remove this once the AwaitsFix below has been resolved + public void testDummy() { + assertTrue(true); + } + + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39858") public void testJobRelocation() throws Exception { internalCluster().ensureAtLeastNumDataNodes(5); ensureStableCluster(5); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index c52a5a592d817..60e35cc032364 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -37,6 +38,7 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; @@ -104,6 +106,12 @@ public class JobManagerTests extends ESTestCase { private Auditor auditor; private UpdateJobProcessNotifier updateJobProcessNotifier; + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + @Before public void setup() throws Exception { Settings settings = Settings.builder() @@ -586,7 +594,7 @@ private Job.Builder createJob() { private JobManager createJobManager(Client client) { return new JobManager(environment, environment.settings(), jobResultsProvider, clusterService, - auditor, threadPool, client, updateJobProcessNotifier); + auditor, threadPool, client, updateJobProcessNotifier, xContentRegistry()); } private ClusterState createClusterState() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index 8532cfc4feac4..f2ee3c7825795 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -770,39 +770,38 @@ public void testModelSnapshots() throws IOException { public void testViolatedFieldCountLimit() throws Exception { Map mapping = new HashMap<>(); - for (int i = 0; i < 10; i++) { + + int i = 0; + for (; i < 10; i++) { mapping.put("field" + i, Collections.singletonMap("type", "string")); } - IndexMetaData.Builder indexMetaData1 = new IndexMetaData.Builder("index1") + IndexMetaData indexMetaData1 = new IndexMetaData.Builder("index1") .settings(Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) - .putMapping(new MappingMetaData("type1", Collections.singletonMap("properties", mapping))); - MetaData metaData = MetaData.builder() - .put(indexMetaData1) + .putMapping(new MappingMetaData("type1", Collections.singletonMap("properties", mapping))) .build(); - boolean result = JobResultsProvider.violatedFieldCountLimit("index1", 0, 10, - ClusterState.builder(new ClusterName("_name")).metaData(metaData).build()); + boolean result = JobResultsProvider.violatedFieldCountLimit(0, 10, indexMetaData1.mapping()); assertFalse(result); - result = JobResultsProvider.violatedFieldCountLimit("index1", 1, 10, - ClusterState.builder(new ClusterName("_name")).metaData(metaData).build()); + result = JobResultsProvider.violatedFieldCountLimit(1, 10, indexMetaData1.mapping()); assertTrue(result); - IndexMetaData.Builder indexMetaData2 = new IndexMetaData.Builder("index1") + for (; i < 20; i++) { + mapping.put("field" + i, Collections.singletonMap("type", "string")); + } + + IndexMetaData indexMetaData2 = new IndexMetaData.Builder("index1") .settings(Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) .putMapping(new MappingMetaData("type1", Collections.singletonMap("properties", mapping))) - .putMapping(new MappingMetaData("type2", Collections.singletonMap("properties", mapping))); - metaData = MetaData.builder() - .put(indexMetaData2) .build(); - result = JobResultsProvider.violatedFieldCountLimit("index1", 0, 19, - ClusterState.builder(new ClusterName("_name")).metaData(metaData).build()); + + result = JobResultsProvider.violatedFieldCountLimit(0, 19, indexMetaData2.mapping()); assertTrue(result); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 4a4284e2d1456..999c36c7b4f86 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -28,14 +28,11 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; @@ -355,39 +352,6 @@ public Void answer(InvocationOnMock invocationOnMock) { return this; } - public MockClientBuilder prepareSearchAnySize(String index, String type, SearchResponse response, ArgumentCaptor filter) { - SearchRequestBuilder builder = mock(SearchRequestBuilder.class); - when(builder.setTypes(eq(type))).thenReturn(builder); - when(builder.addSort(any(SortBuilder.class))).thenReturn(builder); - when(builder.setQuery(filter.capture())).thenReturn(builder); - when(builder.setPostFilter(filter.capture())).thenReturn(builder); - when(builder.setFrom(any(Integer.class))).thenReturn(builder); - when(builder.setSize(any(Integer.class))).thenReturn(builder); - when(builder.setFetchSource(eq(true))).thenReturn(builder); - when(builder.addDocValueField(any(String.class))).thenReturn(builder); - when(builder.addDocValueField(any(String.class), any(String.class))).thenReturn(builder); - when(builder.addSort(any(String.class), any(SortOrder.class))).thenReturn(builder); - when(builder.get()).thenReturn(response); - when(client.prepareSearch(eq(index))).thenReturn(builder); - return this; - } - - @SuppressWarnings("unchecked") - public MockClientBuilder prepareIndex(String index, String type, String responseId, ArgumentCaptor getSource) { - IndexRequestBuilder builder = mock(IndexRequestBuilder.class); - PlainActionFuture actionFuture = mock(PlainActionFuture.class); - IndexResponse response = mock(IndexResponse.class); - when(response.getId()).thenReturn(responseId); - - when(client.prepareIndex(eq(index), eq(type))).thenReturn(builder); - when(client.prepareIndex(eq(index), eq(type), any(String.class))).thenReturn(builder); - when(builder.setSource(getSource.capture())).thenReturn(builder); - when(builder.setRefreshPolicy(eq(RefreshPolicy.IMMEDIATE))).thenReturn(builder); - when(builder.execute()).thenReturn(actionFuture); - when(actionFuture.actionGet()).thenReturn(response); - return this; - } - @SuppressWarnings("unchecked") public MockClientBuilder prepareAlias(String indexName, String alias, QueryBuilder filter) { when(aliasesRequestBuilder.addAlias(eq(indexName), eq(alias), eq(filter))).thenReturn(aliasesRequestBuilder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/notifications/AuditorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/notifications/AuditorTests.java index e74d1a7f2de77..441125c931b7c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/notifications/AuditorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/notifications/AuditorTests.java @@ -51,7 +51,6 @@ public void testInfo() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); assertArrayEquals(new String[] {".ml-notifications"}, indexRequest.indices()); - assertEquals("audit_message", indexRequest.type()); assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); AuditMessage auditMessage = parseAuditMessage(indexRequest.source()); assertEquals("foo", auditMessage.getJobId()); @@ -66,7 +65,6 @@ public void testWarning() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); assertArrayEquals(new String[] {".ml-notifications"}, indexRequest.indices()); - assertEquals("audit_message", indexRequest.type()); assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); AuditMessage auditMessage = parseAuditMessage(indexRequest.source()); assertEquals("bar", auditMessage.getJobId()); @@ -81,7 +79,6 @@ public void testError() throws IOException { verify(client).index(indexRequestCaptor.capture(), any()); IndexRequest indexRequest = indexRequestCaptor.getValue(); assertArrayEquals(new String[] {".ml-notifications"}, indexRequest.indices()); - assertEquals("audit_message", indexRequest.type()); assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); AuditMessage auditMessage = parseAuditMessage(indexRequest.source()); assertEquals("foobar", auditMessage.getJobId()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java index 3e54994ac043b..1dd2ba923ef00 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; @@ -29,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.anyString; @@ -157,6 +159,19 @@ public void testRefreshOne() { assertNull(memoryTracker.getJobMemoryRequirement(jobId)); } + public void testStop() { + + memoryTracker.onMaster(); + memoryTracker.stop(); + + AtomicReference exception = new AtomicReference<>(); + memoryTracker.refreshJobMemory("job", ActionListener.wrap(ESTestCase::assertNull, exception::set)); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(EsRejectedExecutionException.class)); + assertEquals("Couldn't run ML memory update - node is shutting down", exception.get().getMessage()); + } + private PersistentTasksCustomMetaData.PersistentTask makeTestTask(String jobId) { return new PersistentTasksCustomMetaData.PersistentTask<>("job-" + jobId, MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), 0, PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT); diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index cc6ebc8c91ccc..8b6f07787fbc1 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -22,6 +20,8 @@ dependencies { // baz - this goes away after we separate out the actions #27759 testCompile "org.elasticsearch.plugin:x-pack-watcher:${version}" + + testCompile "org.elasticsearch.plugin:x-pack-ilm:${version}" } compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" @@ -56,15 +56,15 @@ integTest.enabled = false // Instead we create a separate task to run the // tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' +task internalClusterTest(type: Test) { + description = 'Multi-node tests' + mustRunAfter test + + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test // also add an "alias" task to make typing on the command line easier task icTest { task icTest { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java index 1aa7ef230a390..28cc769c46d7f 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java @@ -233,7 +233,7 @@ protected void doRun() throws Exception { final Collection results = new ArrayList<>(); for (Collector collector : collectors) { if (isStarted() == false) { - // Do not collect more data if the the monitoring service is stopping + // Do not collect more data if the monitoring service is stopping // otherwise some collectors might just fail. return; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java index bdccb5604a361..6aab3114b7807 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java @@ -29,7 +29,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.monitoring.collector.ccr.FollowStatsMonitoringDoc.TYPE; public final class StatsCollector extends Collector { @@ -75,7 +74,7 @@ protected Collection doCollect( final MonitoringDoc.Node node, final long interval, final ClusterState clusterState) throws Exception { - try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) { + try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(MONITORING_ORIGIN)) { final long timestamp = timestamp(); final String clusterUuid = clusterUuid(clusterState); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java index 8742e0b645f8e..855780d4836ae 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java @@ -25,7 +25,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; /** * Collector for Machine Learning Job Stats. @@ -73,7 +72,7 @@ protected List doCollect(final MonitoringDoc.Node node, final long interval, final ClusterState clusterState) throws Exception { // fetch details about all jobs - try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) { + try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(MONITORING_ORIGIN)) { final GetJobsStatsAction.Response jobs = client.getJobsStats(new GetJobsStatsAction.Request(MetaData.ALL)) .actionGet(getCollectionTimeout()); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java index 0501c6257270d..938f0f57c2651 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ExportBulk.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -14,7 +15,6 @@ import java.util.Collection; import java.util.List; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; /** @@ -25,7 +25,10 @@ public abstract class ExportBulk { protected final String name; protected final ThreadContext threadContext; - private final AtomicReference state = new AtomicReference<>(State.INITIALIZING); + /** + * {@code closed} being {@code false} means that it can still be added onto. + */ + private final AtomicBoolean closed = new AtomicBoolean(); public ExportBulk(String name, ThreadContext threadContext) { this.name = Objects.requireNonNull(name); @@ -45,7 +48,7 @@ public String getName() { * Add documents to the exporting bulk */ public void add(Collection docs) throws ExportException { - if (state.get() == State.INITIALIZING) { + if (closed.get() == false) { doAdd(docs); } } @@ -56,7 +59,7 @@ public void add(Collection docs) throws ExportException { * Flush the exporting bulk */ public void flush(ActionListener listener) { - if (state.compareAndSet(State.INITIALIZING, State.FLUSHING)) { + if (closed.compareAndSet(false, true)) { doFlush(listener); } else { listener.onResponse(null); @@ -65,56 +68,6 @@ public void flush(ActionListener listener) { protected abstract void doFlush(ActionListener listener); - /** - * Close the exporting bulk - */ - public void close(boolean flush, ActionListener listener) { - if (state.getAndSet(State.CLOSED) != State.CLOSED) { - if (flush) { - flushAndClose(listener); - } else { - doClose(listener); - } - } else { - listener.onResponse(null); - } - } - - private void flushAndClose(ActionListener listener) { - doFlush(new ActionListener() { - @Override - public void onResponse(Void aVoid) { - doClose(listener); - } - - @Override - public void onFailure(Exception e) { - // we need to close in spite of the failure, but we will return the failure - doClose(new ActionListener() { - - private final ExportException exportException = new ExportException("Exception when closing export bulk", e); - - @Override - public void onResponse(Void aVoid) { - listener.onFailure(exportException); - } - - @Override - public void onFailure(Exception e) { - exportException.addSuppressed(e); - listener.onFailure(exportException); - } - }); - } - }); - } - - protected abstract void doClose(ActionListener listener); - - protected boolean isClosed() { - return state.get() == State.CLOSED; - } - /** * This class holds multiple export bulks exposed as a single compound bulk. */ @@ -170,54 +123,16 @@ protected void doFlush(ActionListener listener) { iteratingActionListener.run(); } - @Override - protected void doClose(ActionListener listener) { - final SetOnce exceptionRef = new SetOnce<>(); - final BiConsumer> bulkBiConsumer = (exportBulk, iteratingListener) -> { - // for every export bulk we close and pass back the response, which should always be - // null. When we have an exception, we wrap the first and then add suppressed exceptions - exportBulk.doClose(ActionListener.wrap(iteratingListener::onResponse, e -> { - if (exceptionRef.get() == null) { - exceptionRef.set(new ExportException("failed to close export bulks", e)); - } else if (e instanceof ExportException) { - exceptionRef.get().addExportException((ExportException) e); - } else { - exceptionRef.get().addSuppressed(e); - } - // this is tricky to understand but basically we suppress the exception for use - // later on and call the passed in listener so that iteration continues - iteratingListener.onResponse(null); - })); - }; - IteratingActionListener iteratingActionListener = - new IteratingActionListener<>(newExceptionHandlingListener(exceptionRef, listener), bulkBiConsumer, bulks, - threadContext); - iteratingActionListener.run(); - } - private static ActionListener newExceptionHandlingListener(SetOnce exceptionRef, ActionListener listener) { - return new ActionListener() { - @Override - public void onResponse(Void aVoid) { - if (exceptionRef.get() == null) { - listener.onResponse(null); - } else { - listener.onFailure(exceptionRef.get()); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + return ActionListener.wrap(r -> { + if (exceptionRef.get() == null) { + listener.onResponse(null); + } else { + listener.onFailure(exceptionRef.get()); } - }; + }, listener::onFailure); } } - private enum State { - INITIALIZING, - FLUSHING, - CLOSED - } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java index 484361ddc542e..1b8f5dab9e356 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java @@ -179,6 +179,14 @@ void wrapExportBulk(final ActionListener listener) { } final Map exporterMap = exporters.get(); + + // if no exporters are defined (which is only possible if all are defined explicitly disabled), + // then ignore the request immediately + if (exporterMap.isEmpty()) { + listener.onResponse(null); + return; + } + final AtomicArray accumulatedBulks = new AtomicArray<>(exporterMap.size()); final CountDown countDown = new CountDown(exporterMap.size()); @@ -225,7 +233,7 @@ private void doExport(final ExportBulk bulk, final Collection doc } catch (ExportException e) { exceptionRef.set(e); } finally { - bulk.close(lifecycleState() == Lifecycle.State.STARTED, ActionListener.wrap(r -> { + bulk.flush(ActionListener.wrap(r -> { if (exceptionRef.get() == null) { listener.onResponse(null); } else { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java index f6547c575f787..efec87cc919f4 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResource.java @@ -105,7 +105,7 @@ protected void doCheck(final RestClient client, final ActionListener li @Override protected void doPublish(final RestClient client, final ActionListener listener) { putResource(client, listener, logger, - "/_watcher/watch", watchId.get(), this::watchToHttpEntity, "monitoring cluster alert", + "/_watcher/watch", watchId.get(), Collections.emptyMap(), this::watchToHttpEntity, "monitoring cluster alert", resourceOwnerName, "monitoring cluster"); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java index cd307322cb547..3476495cc928a 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExportBulk.java @@ -123,11 +123,6 @@ public void onFailure(Exception exception) { } } - @Override - protected void doClose(ActionListener listener) { - listener.onResponse(null); - } - private byte[] toBulkBytes(final MonitoringDoc doc) throws IOException { final XContentType xContentType = XContentType.JSON; final XContent xContent = xContentType.xContent(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index 19844be819a67..0f38ed92da16b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -592,7 +592,8 @@ private static void configureTemplateResources(final Config config, resources.add(new TemplateHttpResource(resourceOwnerName, templateTimeout, templateName, templateLoader)); } - // add old templates, like ".monitoring-data-2" and ".monitoring-es-2" so that other versions can continue to work + // Add dummy templates (e.g. ".monitoring-es-6") to enable the ability to check which version of the actual + // index template (e.g. ".monitoring-es") should be applied. boolean createLegacyTemplates = TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); if (createLegacyTemplates) { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java index d902abf71131e..44bda96a82d1d 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/MultiHttpResource.java @@ -19,7 +19,7 @@ *

* By telling the {@code MultiHttpResource} to become dirty, it effectively marks all of its sub-resources dirty as well. *

- * Sub-resources should be the sole responsibility of the the {@code MultiHttpResource}; there should not be something using them directly + * Sub-resources should be the sole responsibility of the {@code MultiHttpResource}; there should not be something using them directly * if they are included in a {@code MultiHttpResource}. */ public class MultiHttpResource extends HttpResource { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java index 206ef924c5399..8dd48c2876f42 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResource.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; +import java.util.Collections; import java.util.Objects; import java.util.function.Supplier; @@ -72,7 +73,7 @@ protected void doCheck(final RestClient client, final ActionListener li @Override protected void doPublish(final RestClient client, final ActionListener listener) { putResource(client, listener, logger, - "/_ingest/pipeline", pipelineName, this::pipelineToHttpEntity, "monitoring pipeline", + "/_ingest/pipeline", pipelineName, Collections.emptyMap(), this::pipelineToHttpEntity, "monitoring pipeline", resourceOwnerName, "monitoring cluster"); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index 143132b3dd055..4b73a71604599 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -70,7 +70,7 @@ public abstract class PublishableHttpResource extends HttpResource { /** * The default parameters to use for any request. */ - protected final Map parameters; + protected final Map defaultParameters; /** * Create a new {@link PublishableHttpResource} that {@linkplain #isDirty() is dirty}. @@ -102,9 +102,9 @@ protected PublishableHttpResource(final String resourceOwnerName, @Nullable fina parameters.putAll(baseParameters); parameters.put("master_timeout", masterTimeout.toString()); - this.parameters = Collections.unmodifiableMap(parameters); + this.defaultParameters = Collections.unmodifiableMap(parameters); } else { - this.parameters = baseParameters; + this.defaultParameters = baseParameters; } } @@ -113,8 +113,8 @@ protected PublishableHttpResource(final String resourceOwnerName, @Nullable fina * * @return Never {@code null}. */ - public Map getParameters() { - return parameters; + public Map getDefaultParameters() { + return defaultParameters; } /** @@ -221,7 +221,8 @@ protected void checkForResource(final RestClient client, logger.trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType); final Request request = new Request("GET", resourceBasePath + "/" + resourceName); - addParameters(request); + addDefaultParameters(request); + // avoid exists and DNE parameters from being an exception by default final Set expectedResponseCodes = Sets.union(exists, doesNotExist); request.addParameter("ignore", expectedResponseCodes.stream().map(i -> i.toString()).collect(Collectors.joining(","))); @@ -299,6 +300,7 @@ public void onFailure(final Exception exception) { * @param logger The logger to use for status messages. * @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template"). * @param resourceName The name of the resource (e.g., "template123"). + * @param parameters Map of query string parameters, if any. * @param body The {@link HttpEntity} that makes up the body of the request. * @param resourceType The type of resource (e.g., "monitoring template"). * @param resourceOwnerName The user-recognizeable resource owner. @@ -309,6 +311,7 @@ protected void putResource(final RestClient client, final Logger logger, final String resourceBasePath, final String resourceName, + final Map parameters, final java.util.function.Supplier body, final String resourceType, final String resourceOwnerName, @@ -317,7 +320,8 @@ protected void putResource(final RestClient client, final Request request = new Request("PUT", resourceBasePath + "/" + resourceName); - addParameters(request); + addDefaultParameters(request); + addParameters(request, parameters); request.setEntity(body.get()); client.performRequestAsync(request, new ResponseListener() { @@ -376,9 +380,9 @@ protected void deleteResource(final RestClient client, logger.trace("deleting {} [{}] from the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType); final Request request = new Request("DELETE", resourceBasePath + "/" + resourceName); - addParameters(request); + addDefaultParameters(request); - if (false == parameters.containsKey("ignore")) { + if (false == defaultParameters.containsKey("ignore")) { // avoid 404 being an exception by default request.addParameter("ignore", Integer.toString(RestStatus.NOT_FOUND.getStatus())); } @@ -463,7 +467,11 @@ protected boolean alwaysReplaceResource(final Response response) { return true; } - private void addParameters(final Request request) { + private void addDefaultParameters(final Request request) { + this.addParameters(request, defaultParameters); + } + + private void addParameters(final Request request, final Map parameters) { for (final Map.Entry param : parameters.entrySet()) { request.addParameter(param.getKey(), param.getValue()); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java index 28bca87b0a096..d523864c6715b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResource.java @@ -12,19 +12,26 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.RestClient; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; +import java.io.IOException; import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.TreeMap; import java.util.function.Supplier; -import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; - /** * {@code TemplateHttpResource}s allow the checking and uploading of templates to a remote cluster. *

@@ -41,7 +48,6 @@ public class TemplateHttpResource extends PublishableHttpResource { static { Map parameters = new TreeMap<>(); parameters.put("filter_path", FILTER_PATH_RESOURCE_VERSION); - parameters.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); PARAMETERS = Collections.unmodifiableMap(parameters); } @@ -89,7 +95,7 @@ protected void doCheck(final RestClient client, final ActionListener li @Override protected void doPublish(final RestClient client, final ActionListener listener) { putResource(client, listener, logger, - "/_template", templateName, this::templateToHttpEntity, "monitoring template", + "/_template", templateName, Collections.emptyMap(), this::templateToHttpEntity, "monitoring template", resourceOwnerName, "monitoring cluster"); } @@ -98,8 +104,17 @@ protected void doPublish(final RestClient client, final ActionListener * * @return Never {@code null}. */ - HttpEntity templateToHttpEntity() { - return new StringEntity(template.get(), ContentType.APPLICATION_JSON); + HttpEntity templateToHttpEntity() { + // the internal representation of a template has type nested under mappings. + // this uses xContent to help remove the type before sending to the remote cluster + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, template.get())) { + XContentBuilder builder = JsonXContent.contentBuilder(); + IndexTemplateMetaData.Builder.removeType(IndexTemplateMetaData.Builder.fromXContent(parser, templateName), builder); + return new StringEntity(BytesReference.bytes(builder).utf8ToString(), ContentType.APPLICATION_JSON); + } catch (IOException ex) { + throw new IllegalStateException("Cannot serialize template [" + templateName + "] for monitoring export", ex); + } } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java index 3eb92fd68a3cf..4b80f614b485d 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalBulk.java @@ -30,8 +30,8 @@ /** * LocalBulk exports monitoring data in the local cluster using bulk requests. Its usage is not thread safe since the - * {@link LocalBulk#add(Collection)}, {@link LocalBulk#flush(org.elasticsearch.action.ActionListener)} and - * {@link LocalBulk#doClose(ActionListener)} methods are not synchronized. + * {@link LocalBulk#add(Collection)} and {@link LocalBulk#flush(org.elasticsearch.action.ActionListener)} + * methods are not synchronized. */ public class LocalBulk extends ExportBulk { @@ -52,13 +52,10 @@ public class LocalBulk extends ExportBulk { } @Override - public void doAdd(Collection docs) throws ExportException { + protected void doAdd(Collection docs) throws ExportException { ExportException exception = null; for (MonitoringDoc doc : docs) { - if (isClosed()) { - return; - } if (requestBuilder == null) { requestBuilder = client.prepareBulk(); } @@ -99,8 +96,8 @@ public void doAdd(Collection docs) throws ExportException { } @Override - public void doFlush(ActionListener listener) { - if (requestBuilder == null || requestBuilder.numberOfActions() == 0 || isClosed()) { + protected void doFlush(ActionListener listener) { + if (requestBuilder == null || requestBuilder.numberOfActions() == 0) { listener.onResponse(null); } else { try { @@ -138,11 +135,4 @@ void throwExportException(BulkItemResponse[] bulkItemResponses, ActionListener listener) { - if (isClosed() == false) { - requestBuilder = null; - } - listener.onResponse(null); - } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java index 9df60f8c5ac73..434b0bd9fa0ff 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.monitoring.rest.action; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -40,7 +41,8 @@ public class RestMonitoringBulkAction extends XPackRestHandler { public static final String MONITORING_ID = "system_id"; public static final String MONITORING_VERSION = "system_api_version"; public static final String INTERVAL = "interval"; - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestMonitoringBulkAction.class)); + private static final Logger logger = LogManager.getLogger(RestMonitoringBulkAction.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); private final Map> supportedApiVersions; public RestMonitoringBulkAction(Settings settings, RestController controller) { @@ -50,10 +52,6 @@ public RestMonitoringBulkAction(Settings settings, RestController controller) { POST, "/_xpack/monitoring/_bulk", deprecationLogger); controller.registerWithDeprecatedHandler(PUT, "/_monitoring/bulk", this, PUT, "/_xpack/monitoring/_bulk", deprecationLogger); - controller.registerWithDeprecatedHandler(POST, "/_monitoring/{type}/bulk", this, - POST, "/_xpack/monitoring/{type}/_bulk", deprecationLogger); - controller.registerWithDeprecatedHandler(PUT, "/_monitoring/{type}/bulk", this, - PUT, "/_xpack/monitoring/{type}/_bulk", deprecationLogger); final List allVersions = Arrays.asList( MonitoringTemplateUtils.TEMPLATE_VERSION, @@ -63,8 +61,7 @@ public RestMonitoringBulkAction(Settings settings, RestController controller) { final Map> versionsMap = new HashMap<>(); versionsMap.put(MonitoredSystem.KIBANA, allVersions); versionsMap.put(MonitoredSystem.LOGSTASH, allVersions); - // Beats did not report data in the 5.x timeline, so it should never send the original version - versionsMap.put(MonitoredSystem.BEATS, Collections.singletonList(MonitoringTemplateUtils.TEMPLATE_VERSION)); + versionsMap.put(MonitoredSystem.BEATS, allVersions); supportedApiVersions = Collections.unmodifiableMap(versionsMap); } @@ -75,7 +72,6 @@ public String getName() { @Override public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { - final String defaultType = request.param("type"); final String id = request.param(MONITORING_ID); if (Strings.isEmpty(id)) { @@ -106,7 +102,7 @@ public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient cli final long intervalMillis = parseTimeValue(intervalAsString, INTERVAL).getMillis(); final MonitoringBulkRequestBuilder requestBuilder = client.monitoring().prepareMonitoringBulk(); - requestBuilder.add(system, defaultType, request.content(), request.getXContentType(), timestamp, intervalMillis); + requestBuilder.add(system, request.content(), request.getXContentType(), timestamp, intervalMillis); return channel -> requestBuilder.execute(new RestBuilderListener(channel) { @Override public RestResponse buildResponse(MonitoringBulkResponse response, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json index fca6171b62329..4e250d5d743bd 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json @@ -2,7 +2,7 @@ "metadata": { "name": "X-Pack Monitoring: Cluster Status (${monitoring.watch.cluster_uuid})", "xpack": { - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "link": "elasticsearch/indices", "severity": 2100, @@ -47,19 +47,8 @@ } }, { - "bool": { - "should": [ - { - "term": { - "_type": "cluster_state" - } - }, - { - "term": { - "type": "cluster_stats" - } - } - ] + "term": { + "type": "cluster_stats" } }, { @@ -82,7 +71,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -110,7 +99,7 @@ "request": { "search_type": "query_then_fetch", "indices": [ - ".monitoring-kibana-6-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -151,7 +140,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6", + "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json index 0566d03f21f5f..d79cb786267d4 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json @@ -2,7 +2,7 @@ "metadata": { "name": "X-Pack Monitoring: Nodes Changed (${monitoring.watch.cluster_uuid})", "xpack": { - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "link": "elasticsearch/nodes", "severity": 1999, @@ -76,7 +76,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -116,7 +116,7 @@ "search": { "request": { "indices": [ - ".monitoring-kibana-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -157,7 +157,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6" + "index": ".monitoring-alerts-7" } }, "send_email_to_admin": { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json index 61d77d2b602fb..37132a03c7b64 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json @@ -2,7 +2,7 @@ "metadata": { "name": "X-Pack Monitoring: Elasticsearch Version Mismatch (${monitoring.watch.cluster_uuid})", "xpack": { - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "link": "elasticsearch/nodes", "severity": 1000, @@ -40,19 +40,8 @@ } }, { - "bool": { - "should": [ - { - "term": { - "_type": "cluster_stats" - } - }, - { - "term": { - "type": "cluster_stats" - } - } - ] + "term": { + "type": "cluster_stats" } }, { @@ -78,7 +67,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -106,7 +95,7 @@ "request": { "search_type": "query_then_fetch", "indices": [ - ".monitoring-kibana-6-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -147,7 +136,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6", + "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json index 95de1a1a6383e..3e08fd98843d1 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json @@ -2,7 +2,7 @@ "metadata": { "name": "X-Pack Monitoring: Kibana Version Mismatch (${monitoring.watch.cluster_uuid})", "xpack": { - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "link": "kibana/instances", "severity": 1000, @@ -44,19 +44,8 @@ } }, { - "bool": { - "should": [ - { - "term": { - "_type": "kibana_stats" - } - }, - { - "term": { - "type": "kibana_stats" - } - } - ] + "term": { + "type": "kibana_stats" } } ] @@ -98,7 +87,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -126,7 +115,7 @@ "request": { "search_type": "query_then_fetch", "indices": [ - ".monitoring-kibana-6-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -167,7 +156,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6", + "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json index 7448000fa8516..8bb5b5efe9d79 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json @@ -2,7 +2,7 @@ "metadata": { "name": "X-Pack Monitoring: Logstash Version Mismatch (${monitoring.watch.cluster_uuid})", "xpack": { - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "link": "logstash/instances", "severity": 1000, @@ -44,19 +44,8 @@ } }, { - "bool": { - "should": [ - { - "term": { - "_type": "logstash_stats" - } - }, - { - "term": { - "type": "logstash_stats" - } - } - ] + "term": { + "type": "logstash_stats" } } ] @@ -98,7 +87,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -126,7 +115,7 @@ "request": { "search_type": "query_then_fetch", "indices": [ - ".monitoring-kibana-6-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -167,7 +156,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6", + "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json index d61bb3cd952cc..3f1f49e0240d5 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json @@ -5,7 +5,7 @@ "link": "license", "expires_days": [ 60, 30, 14, 7 ], "severity": 0, - "alert_index": ".monitoring-alerts-6", + "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "type": "monitoring", "version_created": 7000099, @@ -72,7 +72,7 @@ "search": { "request": { "indices": [ - ".monitoring-alerts-6" + ".monitoring-alerts-7" ], "body": { "size": 1, @@ -99,7 +99,7 @@ "search": { "request": { "indices": [ - ".monitoring-kibana-6-*" + ".monitoring-kibana-7-*" ], "body": { "size": 1, @@ -140,7 +140,7 @@ "actions": { "add_to_alerts_index": { "index": { - "index": ".monitoring-alerts-6", + "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" } }, diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java index f1563e8c68b56..8e0346270ed47 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/LocalStateMonitoring.java @@ -10,6 +10,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.indexlifecycle.IndexLifecycle; import org.elasticsearch.xpack.watcher.Watcher; import java.nio.file.Path; @@ -47,5 +48,6 @@ protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); } }); + plugins.add(new IndexLifecycle(settings)); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java index 647835bf9311e..4c8d167514c14 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java @@ -23,6 +23,9 @@ public final class MonitoringTestUtils { + // maximum number of milliseconds before a five digit year comes in, which could change formatting + private static final long MAX_MILLIS_BEFORE_10000 = 253402300799999L; + private MonitoringTestUtils() { } @@ -37,7 +40,7 @@ public static MonitoringDoc.Node randomMonitoringNode(final Random random) { final String host = fakeTransportAddress.address().getHostString(); final String transportAddress = fakeTransportAddress.toString(); final String ip = fakeTransportAddress.getAddress(); - final long timestamp = RandomNumbers.randomLongBetween(random, 0, Long.MAX_VALUE); + final long timestamp = RandomNumbers.randomLongBetween(random, 0, MAX_MILLIS_BEFORE_10000); return new MonitoringDoc.Node(id, host, transportAddress, ip, name, timestamp); } @@ -87,8 +90,7 @@ public static MonitoringBulkDoc randomMonitoringBulkDoc(final Random random, final MonitoredSystem system, final String type) throws IOException { final String id = random.nextBoolean() ? RandomStrings.randomAsciiLettersOfLength(random, 5) : null; - // ending date is the last second of 9999, should be sufficient - final long timestamp = RandomNumbers.randomLongBetween(random, 0L, 253402300799000L); + final long timestamp = RandomNumbers.randomLongBetween(random, 0L, MAX_MILLIS_BEFORE_10000); final long interval = RandomNumbers.randomLongBetween(random, 0L, Long.MAX_VALUE); return new MonitoringBulkDoc(system, type, id, timestamp, interval, source, xContentType); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index fc3bf633a3964..5113371f2b338 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; @@ -77,7 +76,6 @@ public void testAdd() throws IOException { public void testAddRequestContent() throws IOException { final XContentType xContentType = XContentType.JSON; - final String defaultType = rarely() ? randomAlphaOfLength(4) : null; final int nbDocs = randomIntBetween(1, 20); final String[] types = new String[nbDocs]; @@ -94,10 +92,10 @@ public void testAddRequestContent() throws IOException { if (rarely()) { builder.field("_index", ""); } - if (defaultType == null || randomBoolean()) { - types[i] = randomAlphaOfLength(5); - builder.field("_type", types[i]); - } + + types[i] = randomAlphaOfLength(5); + builder.field("_type", types[i]); + if (randomBoolean()) { ids[i] = randomAlphaOfLength(10); builder.field("_id", ids[i]); @@ -125,7 +123,7 @@ public void testAddRequestContent() throws IOException { final long interval = randomNonNegativeLong(); final MonitoringBulkRequest bulkRequest = new MonitoringBulkRequest(); - bulkRequest.add(system, defaultType, content.bytes(), xContentType, timestamp, interval); + bulkRequest.add(system, content.bytes(), xContentType, timestamp, interval); final Collection bulkDocs = bulkRequest.getDocs(); assertNotNull(bulkDocs); @@ -134,7 +132,7 @@ public void testAddRequestContent() throws IOException { int count = 0; for (final MonitoringBulkDoc bulkDoc : bulkDocs) { assertThat(bulkDoc.getSystem(), equalTo(system)); - assertThat(bulkDoc.getType(), equalTo(types[count] != null ? types[count] : defaultType)); + assertThat(bulkDoc.getType(), equalTo(types[count])); assertThat(bulkDoc.getId(), equalTo(ids[count])); assertThat(bulkDoc.getTimestamp(), equalTo(timestamp)); assertThat(bulkDoc.getIntervalMillis(), equalTo(interval)); @@ -142,8 +140,6 @@ public void testAddRequestContent() throws IOException { assertThat(bulkDoc.getXContentType(), equalTo(xContentType)); ++count; } - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testAddRequestContentWithEmptySource() throws IOException { @@ -187,12 +183,10 @@ public void testAddRequestContentWithEmptySource() throws IOException { final MonitoringBulkRequest bulkRequest = new MonitoringBulkRequest(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - bulkRequest.add(randomFrom(MonitoredSystem.values()), null, content.bytes(), xContentType, 0L, 0L) + bulkRequest.add(randomFrom(MonitoredSystem.values()), content.bytes(), xContentType, 0L, 0L) ); assertThat(e.getMessage(), containsString("source is missing for monitoring document [][doc][" + nbDocs + "]")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testAddRequestContentWithUnrecognizedIndexName() throws IOException { @@ -226,12 +220,10 @@ public void testAddRequestContentWithUnrecognizedIndexName() throws IOException final MonitoringBulkRequest bulkRequest = new MonitoringBulkRequest(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - bulkRequest.add(randomFrom(MonitoredSystem.values()), null, content.bytes(), xContentType, 0L, 0L) + bulkRequest.add(randomFrom(MonitoredSystem.values()), content.bytes(), xContentType, 0L, 0L) ); assertThat(e.getMessage(), containsString("unrecognized index name [" + indexName + "]")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSerialization() throws IOException { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index fbb923c3f2f69..e436484bceba4 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -265,6 +265,8 @@ public void testToXContent() throws IOException { when(mockJvmInfo.getVmName()).thenReturn("_jvm_vm_name"); when(mockJvmInfo.getVmVersion()).thenReturn("_jvm_vm_version"); when(mockJvmInfo.getVmVendor()).thenReturn("_jvm_vm_vendor"); + when(mockJvmInfo.getBundledJdk()).thenReturn(true); + when(mockJvmInfo.getUsingBundledJdk()).thenReturn(true); final NodeStats mockNodeStats = mock(NodeStats.class); when(mockNodeStats.getTimestamp()).thenReturn(0L); @@ -481,6 +483,8 @@ public void testToXContent() throws IOException { + "\"vm_name\":\"_jvm_vm_name\"," + "\"vm_version\":\"_jvm_vm_version\"," + "\"vm_vendor\":\"_jvm_vm_vendor\"," + + "\"bundled_jdk\":true," + + "\"using_bundled_jdk\":true," + "\"count\":1" + "}" + "]," diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java index 753930d2b6a51..150271e249a55 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/node/NodeStatsMonitoringDocTests.java @@ -329,7 +329,7 @@ private static NodeStats mockNodeStats() { final OsStats.Cgroup osCgroup = new OsStats.Cgroup("_cpu_acct_ctrl_group", ++iota, "_cpu_ctrl_group", ++iota, ++iota, osCpuStat, "_memory_ctrl_group", "2000000000", "1000000000"); - final OsStats.Mem osMem = new OsStats.Mem(no, no); + final OsStats.Mem osMem = new OsStats.Mem(0, 0); final OsStats.Swap osSwap = new OsStats.Swap(no, no); final OsStats os = new OsStats(no, osCpu, osMem, osSwap, osCgroup); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java index a3b5f5d018da3..849b7e0a1059d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/BaseMonitoringDocTestCase.java @@ -26,8 +26,6 @@ import org.junit.Before; import java.io.IOException; -import java.time.Instant; -import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -166,13 +164,6 @@ public final void testToXContentContainsCommonFields() throws IOException { } } - public void testToUTC() { - final long timestamp = System.currentTimeMillis(); - final String expected = Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC).toString(); - - assertEquals(expected, MonitoringDoc.toUTC(timestamp)); - } - public void testMonitoringNodeConstructor() { final String id = randomAlphaOfLength(5); final String name = randomAlphaOfLengthBetween(3, 10); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java index fde975bfab165..74b38afe45159 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ExportersTests.java @@ -52,6 +52,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -259,6 +260,25 @@ public void testExporterBlocksOnClusterState() { verify(state).blocks(); } + /** + * Verifies that, when no exporters are enabled, the {@code Exporters} will still return as expected. + */ + public void testNoExporters() throws Exception { + Settings.Builder settings = + Settings.builder() + .put("xpack.monitoring.exporters.explicitly_disabled.type", "local") + .put("xpack.monitoring.exporters.explicitly_disabled.enabled", false); + + Exporters exporters = new Exporters(settings.build(), factories, clusterService, licenseState, threadContext); + exporters.start(); + + assertThat(exporters.getEnabledExporters(), empty()); + + assertExporters(exporters); + + exporters.close(); + } + /** * This test creates N threads that export a random number of document * using a {@link Exporters} instance. @@ -276,18 +296,37 @@ public void testConcurrentExports() throws Exception { Exporters exporters = new Exporters(settings.build(), factories, clusterService, licenseState, threadContext); exporters.start(); + assertThat(exporters.getEnabledExporters(), hasSize(nbExporters)); + + final int total = assertExporters(exporters); + + for (Exporter exporter : exporters.getEnabledExporters()) { + assertThat(exporter, instanceOf(CountingExporter.class)); + assertThat(((CountingExporter) exporter).getExportedCount(), equalTo(total)); + } + + exporters.close(); + } + + /** + * Attempt to export a random number of documents via {@code exporters} from multiple threads. + * + * @param exporters The setup / started exporters instance to use. + * @return The total number of documents sent to the {@code exporters}. + */ + private int assertExporters(final Exporters exporters) throws InterruptedException { final Thread[] threads = new Thread[3 + randomInt(7)]; final CyclicBarrier barrier = new CyclicBarrier(threads.length); final List exceptions = new CopyOnWriteArrayList<>(); + final AtomicInteger counter = new AtomicInteger(threads.length); int total = 0; for (int i = 0; i < threads.length; i++) { - int nbDocs = randomIntBetween(10, 50); - total += nbDocs; - + final int threadDocs = randomIntBetween(10, 50); final int threadNum = i; - final int threadDocs = nbDocs; + + total += threadDocs; threads[i] = new Thread(new AbstractRunnable() { @Override @@ -297,18 +336,25 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - List docs = new ArrayList<>(); + final List docs = new ArrayList<>(); for (int n = 0; n < threadDocs; n++) { docs.add(new TestMonitoringDoc(randomAlphaOfLength(5), randomNonNegativeLong(), randomNonNegativeLong(), null, MonitoredSystem.ES, randomAlphaOfLength(5), null, String.valueOf(n))); } - barrier.await(10, TimeUnit.SECONDS); exporters.export(docs, ActionListener.wrap( - r -> logger.debug("--> thread [{}] successfully exported {} documents", threadNum, threadDocs), - e -> logger.debug("--> thread [{}] failed to export {} documents", threadNum, threadDocs))); - + r -> { + counter.decrementAndGet(); + logger.debug("--> thread [{}] successfully exported {} documents", threadNum, threadDocs); + }, + e -> { + exceptions.add(e); + logger.debug("--> thread [{}] failed to export {} documents", threadNum, threadDocs); + }) + ); + barrier.await(10, TimeUnit.SECONDS); } }, "export_thread_" + i); + threads[i].start(); } @@ -317,12 +363,9 @@ protected void doRun() throws Exception { } assertThat(exceptions, empty()); - for (Exporter exporter : exporters.getEnabledExporters()) { - assertThat(exporter, instanceOf(CountingExporter.class)); - assertThat(((CountingExporter) exporter).getExportedCount(), equalTo(total)); - } + assertThat(counter.get(), is(0)); - exporters.close(); + return total; } static class TestExporter extends Exporter { @@ -401,11 +444,6 @@ protected void doFlush(ActionListener listener) { listener.onResponse(null); } - @Override - protected void doClose(ActionListener listener) { - listener.onResponse(null); - } - int getCount() { return count.get(); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/MonitoringTemplateUtilsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/MonitoringTemplateUtilsTests.java index 18c872a2bdeef..4272f386683a6 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/MonitoringTemplateUtilsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/MonitoringTemplateUtilsTests.java @@ -58,7 +58,7 @@ public void testLoadTemplate() throws IOException { assertTemplate(source, equalTo("{\n" + " \"index_patterns\": \".monitoring-data-" + TEMPLATE_VERSION + "\",\n" + " \"mappings\": {\n" + - " \"doc\": {\n" + + " \"_doc\": {\n" + " \"_meta\": {\n" + " \"template.version\": \"" + TEMPLATE_VERSION + "\"\n" + " }\n" + @@ -115,5 +115,4 @@ public void testIndexName() { assertThat(indexName(formatter, MonitoredSystem.BEATS, timestamp), equalTo(".monitoring-beats-" + TEMPLATE_VERSION + "-2017-03-08-13.47.58")); } - } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java index 798053d59be37..8eebd195b4938 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.Map; import java.util.Set; +import java.util.HashMap; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -81,7 +82,7 @@ protected void assertCheckDoesNotExist(final PublishableHttpResource resource, */ protected void assertCheckWithException(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName) { - assertCheckWithException(resource, getParameters(resource.getParameters()), resourceBasePath, resourceName); + assertCheckWithException(resource, getParameters(resource.getDefaultParameters()), resourceBasePath, resourceName); } /** @@ -140,7 +141,7 @@ protected void assertCheckAsDeleteWithException(final PublishableHttpResource re final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException); final Request request = new Request("DELETE", endpoint); - addParameters(request, deleteParameters(resource.getParameters())); + addParameters(request, deleteParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, e); resource.doCheck(client, listener); @@ -155,11 +156,13 @@ protected void assertCheckAsDeleteWithException(final PublishableHttpResource re * @param resource The resource to execute. * @param resourceBasePath The base endpoint (e.g., "/_template") * @param resourceName The resource name (e.g., the template or pipeline name). + * @param parameters Map of query string parameters, if any. * @param bodyType The request body provider's type. */ protected void assertPublishSucceeds(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName, + Map parameters, final Class bodyType) { - doPublishWithStatusCode(resource, resourceBasePath, resourceName, bodyType, successfulPublishStatus(), true); + doPublishWithStatusCode(resource, resourceBasePath, resourceName, parameters, bodyType, successfulPublishStatus(), true); } /** @@ -168,10 +171,12 @@ protected void assertPublishSucceeds(final PublishableHttpResource resource, fin * * @param resource The resource to execute. * @param resourceBasePath The base endpoint (e.g., "/_template") + * @param parameters Map of query string parameters, if any. * @param resourceName The resource name (e.g., the template or pipeline name). */ protected void assertPublishWithException(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName, + Map parameters, final Class bodyType) { final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected")); @@ -182,16 +187,20 @@ protected void assertPublishWithException(final PublishableHttpResource resource verifyListener(null); + Map allParameters = new HashMap<>(); + allParameters.putAll(resource.getDefaultParameters()); + allParameters.putAll(parameters); + final ArgumentCaptor request = ArgumentCaptor.forClass(Request.class); verify(client).performRequestAsync(request.capture(), any(ResponseListener.class)); assertThat(request.getValue().getMethod(), is("PUT")); assertThat(request.getValue().getEndpoint(), is(endpoint)); - assertThat(request.getValue().getParameters(), is(resource.getParameters())); + assertThat(request.getValue().getParameters(), is(allParameters)); assertThat(request.getValue().getEntity(), instanceOf(bodyType)); } protected void assertParameters(final PublishableHttpResource resource) { - final Map parameters = new HashMap<>(resource.getParameters()); + final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); @@ -204,7 +213,7 @@ protected void assertParameters(final PublishableHttpResource resource) { } protected void assertVersionParameters(final PublishableHttpResource resource) { - final Map parameters = new HashMap<>(resource.getParameters()); + final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); @@ -244,7 +253,7 @@ protected void doCheckWithStatusCode(final PublishableHttpResource resource, fin final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Response response = response("GET", endpoint, status, entity); - doCheckWithStatusCode(resource, getParameters(resource.getParameters(), exists, doesNotExist), endpoint, expected, response); + doCheckWithStatusCode(resource, getParameters(resource.getDefaultParameters(), exists, doesNotExist), endpoint, expected, response); } protected void doCheckWithStatusCode(final PublishableHttpResource resource, final Map expectedParameters, @@ -262,6 +271,7 @@ protected void doCheckWithStatusCode(final PublishableHttpResource resource, fin } private void doPublishWithStatusCode(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName, + Map parameters, final Class bodyType, final RestStatus status, final boolean errorFree) { @@ -277,9 +287,13 @@ private void doPublishWithStatusCode(final PublishableHttpResource resource, fin final ArgumentCaptor request = ArgumentCaptor.forClass(Request.class); verify(client).performRequestAsync(request.capture(), any(ResponseListener.class)); + Map allParameters = new HashMap<>(); + allParameters.putAll(resource.getDefaultParameters()); + allParameters.putAll(parameters); + assertThat(request.getValue().getMethod(), is("PUT")); assertThat(request.getValue().getEndpoint(), is(endpoint)); - assertThat(request.getValue().getParameters(), is(resource.getParameters())); + assertThat(request.getValue().getParameters(), is(allParameters)); assertThat(request.getValue().getEntity(), instanceOf(bodyType)); } @@ -297,7 +311,7 @@ protected void doCheckAsDeleteWithStatusCode(final PublishableHttpResource resou final String endpoint, final Boolean expected, final Response response) { final Request request = new Request("DELETE", endpoint); - addParameters(request, deleteParameters(resource.getParameters())); + addParameters(request, deleteParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); resource.doCheck(client, listener); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResourceTests.java index af608d85911cb..a7ede37505e8c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/ClusterAlertHttpResourceTests.java @@ -7,6 +7,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.apache.http.HttpEntity; @@ -127,11 +128,11 @@ public void testDoCheckWithExceptionAsDeleteWatchError() throws IOException { } public void testDoPublishTrue() throws IOException { - assertPublishSucceeds(resource, "/_watcher/watch", watchId, StringEntity.class); + assertPublishSucceeds(resource, "/_watcher/watch", watchId, Collections.emptyMap(), StringEntity.class); } public void testDoPublishFalseWithException() throws IOException { - assertPublishWithException(resource, "/_watcher/watch", watchId, StringEntity.class); + assertPublishWithException(resource, "/_watcher/watch", watchId, Collections.emptyMap(), StringEntity.class); } public void testShouldReplaceClusterAlertRethrowsIOException() throws IOException { @@ -181,7 +182,7 @@ public void testShouldReplaceCheckAlertChecksVersion() throws IOException { } public void testParameters() { - final Map parameters = new HashMap<>(resource.getParameters()); + final Map parameters = new HashMap<>(resource.getDefaultParameters()); assertThat(parameters.remove("filter_path"), is("metadata.xpack.version_created")); assertThat(parameters.isEmpty(), is(true)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index ac3abda0599dd..d4ea017ca8b3d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -21,11 +22,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.http.MockRequest; @@ -59,7 +67,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.LAST_UPDATED_VERSION; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.indexName; @@ -281,14 +288,14 @@ public void testHostChangeReChecksTemplate() throws Exception { MockRequest recordedRequest = secondWebServer.takeRequest(); assertThat(recordedRequest.getMethod(), equalTo("GET")); assertThat(recordedRequest.getUri().getPath(), equalTo(resourcePrefix + template.v1())); - assertMonitorVersionQueryString(resourcePrefix, recordedRequest.getUri().getQuery()); + assertMonitorVersionQueryString(recordedRequest.getUri().getQuery(), Collections.emptyMap()); if (missingTemplate.equals(template.v1())) { recordedRequest = secondWebServer.takeRequest(); assertThat(recordedRequest.getMethod(), equalTo("PUT")); assertThat(recordedRequest.getUri().getPath(), equalTo(resourcePrefix + template.v1())); - assertMonitorVersionQueryString(resourcePrefix, recordedRequest.getUri().getQuery()); - assertThat(recordedRequest.getBody(), equalTo(template.v2())); + assertMonitorVersionQueryString(recordedRequest.getUri().getQuery(), Collections.emptyMap()); + assertThat(recordedRequest.getBody(), equalTo(getExternalTemplateRepresentation(template.v2()))); } } assertMonitorPipelines(secondWebServer, !pipelineExistsAlready, null, null); @@ -464,7 +471,7 @@ private void assertMonitorVersionResource(final MockWebServer webServer, final b assertThat(getRequest.getMethod(), equalTo("GET")); assertThat(getRequest.getUri().getPath(), equalTo(pathPrefix + resourcePrefix + resource.v1())); - assertMonitorVersionQueryString(resourcePrefix, getRequest.getUri().getQuery()); + assertMonitorVersionQueryString(getRequest.getUri().getQuery(), Collections.emptyMap()); assertHeaders(getRequest, customHeaders); if (alreadyExists == false) { @@ -472,19 +479,30 @@ private void assertMonitorVersionResource(final MockWebServer webServer, final b assertThat(putRequest.getMethod(), equalTo("PUT")); assertThat(putRequest.getUri().getPath(), equalTo(pathPrefix + resourcePrefix + resource.v1())); - assertMonitorVersionQueryString(resourcePrefix, getRequest.getUri().getQuery()); - assertThat(putRequest.getBody(), equalTo(resource.v2())); + Map parameters = Collections.emptyMap(); + assertMonitorVersionQueryString(putRequest.getUri().getQuery(), parameters); + if (resourcePrefix.startsWith("/_template")) { + assertThat(putRequest.getBody(), equalTo(getExternalTemplateRepresentation(resource.v2()))); + } else { + assertThat(putRequest.getBody(), equalTo(resource.v2())); + } assertHeaders(putRequest, customHeaders); } } } - private void assertMonitorVersionQueryString(String resourcePrefix, String query) { - if (resourcePrefix.startsWith("/_template")) { - assertThat(query, equalTo(INCLUDE_TYPE_NAME_PARAMETER + "=true&" + resourceVersionQueryString())); - } else { - assertThat(query, equalTo(resourceVersionQueryString())); - } + private void assertMonitorVersionQueryString(String query, final Map parameters) { + Map expectedQueryStringMap = new HashMap<>(); + RestUtils.decodeQueryString(query, 0, expectedQueryStringMap); + + Map resourceVersionQueryStringMap = new HashMap<>(); + RestUtils.decodeQueryString(resourceVersionQueryString(), 0, resourceVersionQueryStringMap); + + Map actualQueryStringMap = new HashMap<>(); + actualQueryStringMap.putAll(resourceVersionQueryStringMap); + actualQueryStringMap.putAll(parameters); + + assertEquals(expectedQueryStringMap, actualQueryStringMap); } private void assertMonitorWatches(final MockWebServer webServer, @@ -590,7 +608,7 @@ private void export(final Settings settings, final Collection doc assertBusy(() -> assertThat(clusterService().state().version(), not(ClusterState.UNKNOWN_VERSION))); try (HttpExporter exporter = createHttpExporter(settings)) { - final CountDownLatch awaitResponseAndClose = new CountDownLatch(2); + final CountDownLatch awaitResponseAndClose = new CountDownLatch(1); exporter.openBulk(ActionListener.wrap(exportBulk -> { final HttpExportBulk bulk = (HttpExportBulk)exportBulk; @@ -602,9 +620,8 @@ private void export(final Settings settings, final Collection doc e -> fail(e.getMessage()) ); - bulk.doAdd(docs); - bulk.doFlush(listener); - bulk.doClose(listener); // reusing the same listener, which is why we expect countDown x2 + bulk.add(docs); + bulk.flush(listener); }, e -> fail("Failed to create HttpExportBulk"))); // block until the bulk responds @@ -897,4 +914,12 @@ protected List monitoringTemplateNamesWithOldTemplates() { return expectedTemplateNames; } + private String getExternalTemplateRepresentation(String internalRepresentation) throws IOException { + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, internalRepresentation)) { + XContentBuilder builder = JsonXContent.contentBuilder(); + IndexTemplateMetaData.Builder.removeType(IndexTemplateMetaData.Builder.fromXContent(parser, ""), builder); + return BytesReference.bytes(builder).utf8ToString(); + } + } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 9c036a53f9e6d..dd8d3161b1462 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -545,7 +545,7 @@ private void assertMasterTimeoutSet(final List resources if (timeout != null) { for (final HttpResource resource : resources) { if (resource instanceof PublishableHttpResource) { - assertEquals(timeout.getStringRep(), ((PublishableHttpResource) resource).getParameters().get("master_timeout")); + assertEquals(timeout.getStringRep(), ((PublishableHttpResource) resource).getDefaultParameters().get("master_timeout")); } } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResourceTests.java index b0ba7442112be..2e038a344b245 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PipelineHttpResourceTests.java @@ -13,6 +13,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Collections; import java.util.function.Supplier; import static org.hamcrest.Matchers.is; @@ -77,11 +78,11 @@ public void testDoCheckError() { } public void testDoPublishTrue() { - assertPublishSucceeds(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class); + assertPublishSucceeds(resource, "/_ingest/pipeline", pipelineName, Collections.emptyMap(), ByteArrayEntity.class); } public void testDoPublishFalseWithException() { - assertPublishWithException(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class); + assertPublishWithException(resource, "/_ingest/pipeline", pipelineName, Collections.emptyMap(), ByteArrayEntity.class); } public void testParameters() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResourceTests.java index f0ab6484f79e0..dc4e2410f2991 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResourceTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter.http; +import java.util.Collections; import java.util.Map; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; @@ -66,7 +67,7 @@ public void testCheckForResourceUnexpectedResponse() throws IOException { final RestStatus failedStatus = failedCheckStatus(); final Response response = response("GET", endpoint, failedStatus); final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); @@ -102,7 +103,7 @@ public void testVersionCheckForResourceUnexpectedResponse() { final XContent xContent = mock(XContent.class); final int minimumVersion = randomInt(); final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); @@ -126,7 +127,7 @@ public void testVersionCheckForResourceMalformedResponse() { final Response response = response("GET", endpoint, okStatus, entity); final XContent xContent = mock(XContent.class); final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); @@ -151,7 +152,7 @@ public void testCheckForResourceErrors() throws IOException { final Response response = e == responseException ? responseException.getResponse() : null; final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, e); @@ -176,12 +177,13 @@ public void testPutResourceFalseWithException() { final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected")); final Request request = new Request("PUT", endpoint); - addParameters(request, resource.getParameters()); + addParameters(request, resource.getDefaultParameters()); request.setEntity(entity); whenPerformRequestAsyncWith(client, request, e); - resource.putResource(client, listener, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType); + final Map parameters = Collections.emptyMap(); + resource.putResource(client, listener, logger, resourceBasePath, resourceName, parameters, body, resourceType, owner, ownerType); verifyListener(null); @@ -207,7 +209,7 @@ public void testDeleteResourceErrors() { final RestStatus failedStatus = failedCheckStatus(); final ResponseException responseException = responseException("DELETE", endpoint, failedStatus); final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException); - final Map deleteParameters = deleteParameters(resource.getParameters()); + final Map deleteParameters = deleteParameters(resource.getDefaultParameters()); final Request request = new Request("DELETE", endpoint); addParameters(request, deleteParameters); @@ -304,7 +306,7 @@ private void assertCheckForResource(final RestStatus status, final Boolean expec final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Response response = response("GET", endpoint, status); final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); @@ -337,7 +339,7 @@ private void assertVersionCheckForResource(final RestStatus status, final Boolea final Response response = response("GET", endpoint, status, entity); final XContent xContent = XContentType.JSON.xContent(); final Request request = new Request("GET", endpoint); - addParameters(request, getParameters(resource.getParameters())); + addParameters(request, getParameters(resource.getDefaultParameters())); whenPerformRequestAsyncWith(client, request, response); @@ -370,12 +372,13 @@ private void assertPutResource(final RestStatus status, final boolean errorFree) final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Response response = response("PUT", endpoint, status); final Request request = new Request("PUT", endpoint); - addParameters(request, resource.getParameters()); + addParameters(request, resource.getDefaultParameters()); request.setEntity(entity); whenPerformRequestAsyncWith(client, request, response); - resource.putResource(client, listener, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType); + final Map parameters = Collections.emptyMap(); + resource.putResource(client, listener, logger, resourceBasePath, resourceName, parameters, body, resourceType, owner, ownerType); verifyListener(errorFree ? true : null); verify(client).performRequestAsync(eq(request), any(ResponseListener.class)); @@ -431,7 +434,7 @@ private void assertCheckForResource(final RestClient client, final Logger logger private void assertDeleteResource(final RestStatus status, final boolean expected) { final String endpoint = concatenateEndpoint(resourceBasePath, resourceName); final Response response = response("DELETE", endpoint, status); - final Map deleteParameters = deleteParameters(resource.getParameters()); + final Map deleteParameters = deleteParameters(resource.getDefaultParameters()); final Request request = new Request("DELETE", endpoint); addParameters(request, deleteParameters); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResourceTests.java index 4be1d2031ac30..9bdf4757c7837 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/TemplateHttpResourceTests.java @@ -13,6 +13,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Collections; import java.util.function.Supplier; import static org.hamcrest.Matchers.is; @@ -23,14 +24,20 @@ public class TemplateHttpResourceTests extends AbstractPublishableHttpResourceTestCase { private final String templateName = ".my_template"; - private final String templateValue = "{\"template\":\".xyz-*\",\"mappings\":{}}"; - private final Supplier template = () -> templateValue; + + //the internal representation has the type, the external representation should not + private final String templateValueInternal = "{\"order\":0,\"index_patterns\":[\".xyz-*\"],\"settings\":{},\"mappings\":{\"_doc\"" + + ":{\"properties\":{\"one\":{\"properties\":{\"two\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}}},\"aliases\":{}}"; + private final String templateValueExternal = "{\"order\":0,\"index_patterns\":[\".xyz-*\"],\"settings\":{},\"mappings\"" + + ":{\"properties\":{\"one\":{\"properties\":{\"two\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}},\"aliases\":{}}"; + private final Supplier template = () -> templateValueInternal; private final int minimumVersion = Math.min(MonitoringTemplateUtils.LAST_UPDATED_VERSION, Version.CURRENT.id); private final TemplateHttpResource resource = new TemplateHttpResource(owner, masterTimeout, templateName, template); public void testTemplateToHttpEntity() throws IOException { - final byte[] templateValueBytes = templateValue.getBytes(ContentType.APPLICATION_JSON.getCharset()); + //the internal representation is converted to the external representation for the resource + final byte[] templateValueBytes = templateValueExternal.getBytes(ContentType.APPLICATION_JSON.getCharset()); final HttpEntity entity = resource.templateToHttpEntity(); assertThat(entity.getContentType().getValue(), is(ContentType.APPLICATION_JSON.toString())); @@ -77,11 +84,11 @@ public void testDoCheckError() { } public void testDoPublishTrue() { - assertPublishSucceeds(resource, "/_template", templateName, StringEntity.class); + assertPublishSucceeds(resource, "/_template", templateName, Collections.emptyMap(), StringEntity.class); } public void testDoPublishFalseWithException() { - assertPublishWithException(resource, "/_template", templateName, StringEntity.class); + assertPublishWithException(resource, "/_template", templateName, Collections.emptyMap(), StringEntity.class); } public void testParameters() { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResourceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResourceTests.java index 1559ffb1c6882..522fc7368a207 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResourceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResourceTests.java @@ -35,7 +35,7 @@ public class WatcherExistsHttpResourceTests extends AbstractPublishableHttpResou private final MultiHttpResource watches = mock(MultiHttpResource.class); private final WatcherExistsHttpResource resource = new WatcherExistsHttpResource(owner, clusterService, watches); - private final Map expectedParameters = getParameters(resource.getParameters(), GET_EXISTS, XPACK_DOES_NOT_EXIST); + private final Map expectedParameters = getParameters(resource.getDefaultParameters(), GET_EXISTS, XPACK_DOES_NOT_EXIST); public void testDoCheckIgnoresClientWhenNotElectedMaster() { whenNotElectedMaster(); @@ -175,7 +175,7 @@ public void testDoPublishException() { } public void testParameters() { - final Map parameters = resource.getParameters(); + final Map parameters = resource.getDefaultParameters(); assertThat(parameters.get("filter_path"), is(WatcherExistsHttpResource.WATCHER_CHECK_PARAMETERS.get("filter_path"))); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 171eeedf88cab..ed5d3ef40ae24 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -218,7 +218,7 @@ public void testExport() throws Exception { */ private void checkMonitoringTemplates() { final Set templates = new HashSet<>(); - templates.add(".monitoring-alerts"); + templates.add(".monitoring-alerts-7"); templates.add(".monitoring-es"); templates.add(".monitoring-kibana"); templates.add(".monitoring-logstash"); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java index 16ea27488d8a4..4af080b7fabde 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterResourceIntegTests.java @@ -81,8 +81,8 @@ private static BytesReference generateTemplateSource(final String name, final In .field("index.number_of_replicas", 0) .endObject() .startObject("mappings") - // Still need use type, RestPutIndexTemplateAction#prepareRequestSource has logic that adds type if missing - .startObject("doc") + // The internal representation still requires a default type of _doc + .startObject("_doc") .startObject("_meta") .field("test", true) .endObject() @@ -194,7 +194,7 @@ private void assertTemplateNotUpdated() { final String name = MonitoringTemplateUtils.templateName(system.getSystem()); for (IndexTemplateMetaData template : client().admin().indices().prepareGetTemplates(name).get().getIndexTemplates()) { - final String docMapping = template.getMappings().get("doc").toString(); + final String docMapping = template.getMappings().get("_doc").toString(); assertThat(docMapping, notNullValue()); assertThat(docMapping, containsString("test")); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index f5ed570acb51f..7a631d6f488c3 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -104,13 +104,13 @@ protected Collection> getPlugins() { } private String createBulkEntity() { - return "{\"index\":{}}\n" + - "{\"foo\":{\"bar\":0}}\n" + - "{\"index\":{}}\n" + - "{\"foo\":{\"bar\":1}}\n" + - "{\"index\":{}}\n" + - "{\"foo\":{\"bar\":2}}\n" + - "\n"; + return "{\"index\":{\"_type\":\"monitoring_data_type\"}}\n" + + "{\"foo\":{\"bar\":0}}\n" + + "{\"index\":{\"_type\":\"monitoring_data_type\"}}\n" + + "{\"foo\":{\"bar\":1}}\n" + + "{\"index\":{\"_type\":\"monitoring_data_type\"}}\n" + + "{\"foo\":{\"bar\":2}}\n" + + "\n"; } /** @@ -127,7 +127,7 @@ public void testMonitoringBulk() throws Exception { final MonitoringBulkResponse bulkResponse = new MonitoringBulkRequestBuilder(client()) - .add(system, "monitoring_data_type", new BytesArray(createBulkEntity().getBytes("UTF-8")), XContentType.JSON, + .add(system, new BytesArray(createBulkEntity().getBytes("UTF-8")), XContentType.JSON, System.currentTimeMillis(), interval.millis()) .get(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java index 10fc10e3f973d..7a4427c9f0fdc 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xpack.core.XPackClient; @@ -122,8 +121,6 @@ public void testNoErrors() throws Exception { assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), is("{\"took\":" + response.getTookInMillis() + ",\"ignored\":false,\"errors\":false}")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testNoErrorsButIgnored() throws Exception { @@ -134,8 +131,6 @@ public void testNoErrorsButIgnored() throws Exception { assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), is("{\"took\":" + response.getTookInMillis() + ",\"ignored\":true,\"errors\":false}")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testWithErrors() throws Exception { @@ -155,8 +150,6 @@ public void testWithErrors() throws Exception { assertThat(restResponse.status(), is(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(restResponse.content().utf8ToString(), is("{\"took\":" + response.getTookInMillis() + ",\"ignored\":false,\"errors\":true,\"error\":" + errorJson + "}")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } /** diff --git a/x-pack/plugin/monitoring/src/test/resources/monitoring-test.json b/x-pack/plugin/monitoring/src/test/resources/monitoring-test.json index e2abf9e699afe..ea69a3680ee15 100644 --- a/x-pack/plugin/monitoring/src/test/resources/monitoring-test.json +++ b/x-pack/plugin/monitoring/src/test/resources/monitoring-test.json @@ -1,7 +1,7 @@ { "index_patterns": ".monitoring-data-${monitoring.template.version}", "mappings": { - "doc": { + "_doc": { "_meta": { "template.version": "${monitoring.template.version}" } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index ad409d4e2ca60..8ebbf1bccf864 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.rollup; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -104,6 +105,7 @@ public class Rollup extends Plugin implements ActionPlugin, PersistentTaskPlugin new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); + private final SetOnce schedulerEngine = new SetOnce<>(); private final Settings settings; private final boolean enabled; private final boolean transportClientMode; @@ -195,12 +197,19 @@ public List> getPersistentTasksExecutor(ClusterServic return emptyList(); } - SchedulerEngine schedulerEngine = new SchedulerEngine(settings, getClock()); - return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(client, schedulerEngine, threadPool)); + schedulerEngine.set(new SchedulerEngine(settings, getClock())); + return Collections.singletonList(new RollupJobTask.RollupJobPersistentTasksExecutor(client, schedulerEngine.get(), threadPool)); } // overridable by tests protected Clock getClock() { return Clock.systemUTC(); } + + @Override + public void close() { + if (schedulerEngine.get() != null) { + schedulerEngine.get().stop(); + } + } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java index 8b028b712e717..4546268119884 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java @@ -224,6 +224,9 @@ private static List translateDateHistogram(DateHistogramAggr filterConditions.add(new TermQueryBuilder(RollupField.formatFieldName(source, DateHistogramGroupConfig.TIME_ZONE), timezone)); + if (source.timeZone() != null) { + rolledDateHisto.timeZone(source.timeZone()); + } rolledDateHisto.offset(source.offset()); if (source.extendedBounds() != null) { rolledDateHisto.extendedBounds(source.extendedBounds()); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 610275705eef8..e85a92c061366 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -56,17 +56,14 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps; import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; -import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.rollup.RollupJobIdentifierUtils; import org.elasticsearch.xpack.rollup.RollupRequestTranslator; import org.elasticsearch.xpack.rollup.RollupResponseTranslator; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -286,11 +283,8 @@ static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCap } else if (builder.getWriteableName().equals(RangeQueryBuilder.NAME)) { RangeQueryBuilder range = (RangeQueryBuilder) builder; String fieldName = range.fieldName(); - // Many range queries don't include the timezone because the default is UTC, but the query - // builder will return null so we need to set it here - String timeZone = range.timeZone() == null ? DateTimeZone.UTC.toString() : range.timeZone(); - String rewrittenFieldName = rewriteFieldName(jobCaps, RangeQueryBuilder.NAME, fieldName, timeZone); + String rewrittenFieldName = rewriteFieldName(jobCaps, RangeQueryBuilder.NAME, fieldName); RangeQueryBuilder rewritten = new RangeQueryBuilder(rewrittenFieldName) .from(range.from()) .to(range.to()) @@ -306,12 +300,12 @@ static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCap } else if (builder.getWriteableName().equals(TermQueryBuilder.NAME)) { TermQueryBuilder term = (TermQueryBuilder) builder; String fieldName = term.fieldName(); - String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName, null); + String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName); return new TermQueryBuilder(rewrittenFieldName, term.value()); } else if (builder.getWriteableName().equals(TermsQueryBuilder.NAME)) { TermsQueryBuilder terms = (TermsQueryBuilder) builder; String fieldName = terms.fieldName(); - String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName, null); + String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName); return new TermsQueryBuilder(rewrittenFieldName, terms.values()); } else if (builder.getWriteableName().equals(MatchAllQueryBuilder.NAME)) { // no-op @@ -321,11 +315,7 @@ static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCap } } - private static String rewriteFieldName(Set jobCaps, - String builderName, - String fieldName, - String timeZone) { - List incompatibleTimeZones = timeZone == null ? Collections.emptyList() : new ArrayList<>(); + private static String rewriteFieldName(Set jobCaps, String builderName, String fieldName) { List rewrittenFieldNames = jobCaps.stream() // We only care about job caps that have the query's target field .filter(caps -> caps.getFieldCaps().keySet().contains(fieldName)) @@ -335,17 +325,7 @@ private static String rewriteFieldName(Set jobCaps, // For now, we only allow filtering on grouping fields .filter(agg -> { String type = (String)agg.get(RollupField.AGG); - - // If the cap is for a date_histo, and the query is a range, the timezones need to match - if (type.equals(DateHistogramAggregationBuilder.NAME) && timeZone != null) { - boolean matchingTZ = ((String)agg.get(DateHistogramGroupConfig.TIME_ZONE)) - .equalsIgnoreCase(timeZone); - if (matchingTZ == false) { - incompatibleTimeZones.add((String)agg.get(DateHistogramGroupConfig.TIME_ZONE)); - } - return matchingTZ; - } - // Otherwise just make sure it's one of the three groups + // make sure it's one of the three groups return type.equals(TermsAggregationBuilder.NAME) || type.equals(DateHistogramAggregationBuilder.NAME) || type.equals(HistogramAggregationBuilder.NAME); @@ -363,14 +343,8 @@ private static String rewriteFieldName(Set jobCaps, .distinct() .collect(ArrayList::new, List::addAll, List::addAll); if (rewrittenFieldNames.isEmpty()) { - if (incompatibleTimeZones.isEmpty()) { - throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builderName + throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builderName + "] query is not available in selected rollup indices, cannot query."); - } else { - throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builderName - + "] query was found in rollup indices, but requested timezone is not compatible. Options include: " - + incompatibleTimeZones); - } } else if (rewrittenFieldNames.size() > 1) { throw new IllegalArgumentException("Ambiguous field name resolution when mapping to rolled fields. Field name [" + fieldName + "] was mapped to: [" + Strings.collectionToDelimitedString(rewrittenFieldNames, ",") + "]."); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java index 1ceac98725e8b..f691d10d20dc7 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java @@ -31,6 +31,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -271,6 +272,37 @@ public void testDateHistoLongIntervalWithMinMax() { } } + public void testDateHistoWithTimezone() { + ZoneId timeZone = ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds())); + DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); + histo.interval(86400000) + .field("foo") + .timeZone(timeZone); + List filterConditions = new ArrayList<>(); + + List translated = translateAggregation(histo, filterConditions, namedWriteableRegistry); + assertThat(translated.size(), equalTo(1)); + assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); + DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); + + assertThat(translatedHisto.interval(), equalTo(86400000L)); + assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); + assertThat(translatedHisto.timeZone(), equalTo(timeZone)); + assertThat(filterConditions.size(), equalTo(1)); + + for (QueryBuilder q : filterConditions) { + if (q instanceof TermQueryBuilder) { + if (((TermQueryBuilder) q).fieldName().equals("foo.date_histogram.time_zone")) { + assertThat(((TermQueryBuilder) q).value(), equalTo(timeZone.toString())); + } else { + fail("Unexpected Term Query in filter conditions: [" + ((TermQueryBuilder) q).fieldName() + "]"); + } + } else { + fail("Unexpected query builder in filter conditions"); + } + } + } + public void testAvgMetric() { List filterConditions = new ArrayList<>(); List translated = translateAggregation(new AvgAggregationBuilder("test_metric") diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 0032b5a88a563..5a851d17e5eaf 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -140,16 +140,15 @@ public void testRangeNullTimeZone() { assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.timestamp")); } - public void testRangeWrongTZ() { + public void testRangeDifferentTZ() { final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "UTC")); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); caps.add(cap); - Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("CET"), caps)); - assertThat(e.getMessage(), equalTo("Field [foo] in [range] query was found in rollup indices, but requested timezone is not " + - "compatible. Options include: [UTC]")); + QueryBuilder rewritten = TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("CET"), caps); + assertThat(rewritten, instanceOf(RangeQueryBuilder.class)); + assertThat(((RangeQueryBuilder)rewritten).fieldName(), equalTo("foo.date_histogram.timestamp")); } public void testTermQuery() { diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index afc39d5df5010..32a92c513f25d 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -126,6 +126,11 @@ dependencies { compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" +processTestResources { + from(project(xpackModule('core')).file('src/main/config')) + from(project(xpackModule('core')).file('src/test/resources')) +} + configurations { testArtifacts.extendsFrom testRuntime } @@ -138,10 +143,7 @@ artifacts { archives jar testArtifacts testJar } -sourceSets.test.resources { - srcDir '../core/src/test/resources' - srcDir '../core/src/main/config' -} + dependencyLicenses { mapping from: /java-support|opensaml-.*/, to: 'shibboleth' mapping from: /http.*/, to: 'httpclient' @@ -286,7 +288,7 @@ run { plugin xpackModule('core') } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. @@ -298,3 +300,13 @@ unitTest { // installing them as individual plugins for integ tests doesn't make sense, // so we disable integ tests integTest.enabled = false + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 1c684809a3203..19a8d11dd6f0c 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -8,8 +8,8 @@ dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') - compile 'org.bouncycastle:bcpkix-jdk15on:1.59' - compile 'org.bouncycastle:bcprov-jdk15on:1.59' + compile "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" + compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" testImplementation 'com.google.jimfs:jimfs:1.1' testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -23,7 +23,7 @@ dependencyLicenses { } if (project.inFipsJvm) { - unitTest.enabled = false + test.enabled = false testingConventions.enabled = false // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. diff --git a/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.59.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.59.jar.sha1 deleted file mode 100644 index be5e561ee9a76..0000000000000 --- a/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cef0aab8a4bb849a8476c058ce3ff302aba3fff \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.61.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..7ae081447a929 --- /dev/null +++ b/x-pack/plugin/security/cli/licenses/bcpkix-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +89bb3aa5b98b48e584eee2a7401b7682a46779b4 \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.59.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.59.jar.sha1 deleted file mode 100644 index aa42dbb8f6906..0000000000000 --- a/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.59.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2507204241ab450456bdb8e8c0a8f986e418bd99 \ No newline at end of file diff --git a/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.61.jar.sha1 b/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.61.jar.sha1 new file mode 100644 index 0000000000000..0ccfcd61a0e59 --- /dev/null +++ b/x-pack/plugin/security/cli/licenses/bcprov-jdk15on-1.61.jar.sha1 @@ -0,0 +1 @@ +00df4b474e71be02c1349c3292d98886f888d1f7 \ No newline at end of file diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index 4b30224dcd481..e7411082c743d 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -529,7 +529,7 @@ private static void printIntro(Terminal terminal, boolean csr) { terminal.println(" the certificate and private key will also be included in the output file."); } terminal.println("* Information about each instance"); - terminal.println(" * An instance is any piece of the Elastic Stack that requires a SSL certificate."); + terminal.println(" * An instance is any piece of the Elastic Stack that requires an SSL certificate."); terminal.println(" Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats"); terminal.println(" may all require a certificate and private key."); terminal.println(" * The minimum required value for each instance is a name. This can simply be the"); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 435305b8a6914..0b6916f60da21 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -149,7 +149,7 @@ public static void main(String[] args) throws Exception { "signing requests for use with SSL/TLS in the Elastic stack."; static final String INSTANCE_EXPLANATION = - " * An instance is any piece of the Elastic Stack that requires a SSL certificate.\n" + + " * An instance is any piece of the Elastic Stack that requires an SSL certificate.\n" + " Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats\n" + " may all require a certificate and private key.\n" + " * The minimum required value for each instance is a name. This can simply be the\n" + diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle new file mode 100644 index 0000000000000..a21e3c68d3fc4 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -0,0 +1,68 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +task integTestNoSecurity(type: RestIntegTestTask) { + description = "Run tests against a cluster that doesn't have security" +} +tasks.getByName("integTestNoSecurityRunner").configure { + systemProperty 'tests.has_security', 'false' +} +check.dependsOn(integTestNoSecurity) + +task integTestSecurity(type: RestIntegTestTask) { + dependsOn integTestNoSecurity + description = "Run tests against a cluster that has security" +} +tasks.getByName("integTestSecurityRunner").configure { + systemProperty 'tests.has_security', 'true' +} +check.dependsOn(integTestSecurity) + +configure(extensions.findByName("integTestNoSecurityCluster")) { + clusterName = "enable-security-on-basic" + numNodes = 2 + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'false' +} + +Task noSecurityTest = tasks.findByName("integTestNoSecurity") +configure(extensions.findByName("integTestSecurityCluster")) { + clusterName = "basic-license" + numNodes = 2 + dataDir = { nodeNum -> noSecurityTest.nodes[nodeNum].dataDir } + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.anonymous.roles', 'anonymous' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.certificate', 'transport.crt' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'ca.crt' + + extraConfigFile 'transport.key', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.key').toFile() + extraConfigFile 'transport.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.crt').toFile() + extraConfigFile 'ca.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/ca.crt').toFile() + + setupCommand 'setupAdminUser', + 'bin/elasticsearch-users', 'useradd', 'admin_user', '-p', 'admin-password', '-r', 'superuser' + setupCommand 'setupTestUser' , + 'bin/elasticsearch-users', 'useradd', 'security_test_user', '-p', 'security-test-password', '-r', 'security_test_role' + extraConfigFile 'roles.yml', project.projectDir.toPath().resolve('src/test/resources/roles.yml').toFile() +} + +integTest.enabled = false diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java new file mode 100644 index 0000000000000..fa64a89f2f633 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/java/org/elasticsearch/xpack/security/EnableSecurityOnBasicLicenseIT.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.security.authc.InternalRealms; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class EnableSecurityOnBasicLicenseIT extends ESRestTestCase { + + private static boolean securityEnabled; + + @BeforeClass + public static void checkTestMode() { + final String hasSecurity = System.getProperty("tests.has_security"); + securityEnabled = Booleans.parseBoolean(hasSecurity); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("security_test_user", new SecureString("security-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + // If this is the first run (security not yet enabled), then don't clean up afterwards because we want to test restart with data + return securityEnabled == false; + } + + public void testSecuritySetup() throws Exception { + logger.info("Security status: {}", securityEnabled); + logger.info("Cluster:\n{}", getClusterInfo()); + logger.info("Indices:\n{}", getIndices()); + checkBasicLicenseType(); + + checkSecurityStatus(securityEnabled); + if (securityEnabled) { + checkAuthentication(); + } + + checkAllowedWrite("index_allowed"); + // Security runs second, and should see the doc from the first (non-security) run + final int expectedIndexCount = securityEnabled ? 2 : 1; + checkIndexCount("index_allowed", expectedIndexCount); + + final String otherIndex = "index_" + randomAlphaOfLengthBetween(2, 6).toLowerCase(Locale.ROOT); + if (securityEnabled) { + checkDeniedWrite(otherIndex); + } else { + checkAllowedWrite(otherIndex); + } + } + + private String getClusterInfo() throws IOException { + Map info = getAsMap("/"); + assertThat(info, notNullValue()); + return info.toString(); + } + + private String getIndices() throws IOException { + final Request request = new Request("GET", "/_cat/indices"); + Response response = client().performRequest(request); + return EntityUtils.toString(response.getEntity()); + } + + private void checkBasicLicenseType() throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo("basic")); + } + + private void checkSecurityStatus(boolean expectEnabled) throws IOException { + Map usage = getAsMap("/_xpack/usage"); + assertThat(usage, notNullValue()); + assertThat(ObjectPath.evaluate(usage, "security.available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.enabled"), equalTo(expectEnabled)); + if (expectEnabled) { + for (String realm : Arrays.asList("file", "native")) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(true)); + } + for (String realm : InternalRealms.getConfigurableRealmsTypes()) { + if (realm.equals("file") == false && realm.equals("native") == false) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(false)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(false)); + } + } + } + } + + private void checkAuthentication() throws IOException { + final Map auth = getAsMap("/_security/_authenticate"); + // From file realm, configured in build.gradle + assertThat(ObjectPath.evaluate(auth, "username"), equalTo("security_test_user")); + assertThat(ObjectPath.evaluate(auth, "roles"), contains("security_test_role")); + } + + private void checkAllowedWrite(String indexName) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + Response response = client().performRequest(request); + final Map result = entityAsMap(response); + assertThat(ObjectPath.evaluate(result, "_index"), equalTo(indexName)); + assertThat(ObjectPath.evaluate(result, "result"), equalTo("created")); + } + + private void checkDeniedWrite(String indexName) { + final Request request = new Request("POST", "/" + indexName + "/_doc"); + request.setJsonEntity("{ \"key\" : \"value\" }"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized for user [security_test_user]")); + } + + private void checkIndexCount(String indexName, int expectedCount) throws IOException { + final Request request = new Request("POST", "/" + indexName + "/_refresh"); + adminClient().performRequest(request); + + final Map result = getAsMap("/" + indexName + "/_count"); + assertThat(ObjectPath.evaluate(result, "count"), equalTo(expectedCount)); + } +} diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml new file mode 100644 index 0000000000000..eb6c3ec45786b --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/roles.yml @@ -0,0 +1,14 @@ +# A basic role that is used to test security +security_test_role: + cluster: + - monitor + - "cluster:admin/xpack/license/*" + indices: + - names: [ "index_allowed" ] + privileges: [ "read", "write", "create_index" ] + - names: [ "*" ] + privileges: [ "monitor" ] + +anonymous: + cluster: + - monitor \ No newline at end of file diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..b3729f42d17b0 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,30 @@ += Keystore Details + +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (7.0.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.* ./ + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt new file mode 100644 index 0000000000000..95068217a612a --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIVAL0RCyWTbBDd2ntuWoqRwW0IE9+9MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTQwN1oXDTI4MTEyODAzNTQwN1owNDEyMDAG +A1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5lcmF0ZWQgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDA4VwADiyl+Xl15D27gtpS +TXZfHt40MUx12FY0MEd3A3hU+Fp4PaLE2ejECx04yrq8Rfc0Yltux/Fc5zE98XM8 +dY4j0QN/e6C/f0mrBI0KaJ25nv0MWFvoqS/D3vWvDFLUP1a3OZICWWoBDG+zCHe5 +Aq0qwge+FU9IUc7G2WPJeUp4e0+EzLxFInls3rTX1xkyq8Q6PT3gi0RZKvHqIudL +DAXDVEGWNxEX9KwQ1nMtRkDZICx/W665kZiBD4XC3WuEkYlDL1ISVw3cmsbYdhb4 +IusIK5zNERi4ewTgDDxic8TbRpkQW189/M3IglrQipH5ixfF6oNSyoRVAa3KZqj5 +AgMBAAGjUzBRMB0GA1UdDgQWBBRI4mOaeunbu60GfjWTpHcvhb6/YTAfBgNVHSME +GDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQCUOXddlGoU+Ni85D0cRjYYxyx8a5Rwngp+kztttT/5l3Ch +5JMZyl/xcaTryh37BG3+NuqKR1zHtcLpq/+xaCrwBQ8glJofF+1n9w4zBL9nrH5c +O5NgG7+u/sfB+xdqMVdoBBqfm1Roq7O1T/kBXis1+5ZtBlj+7WIKeWWTZGLTrHV+ +MW5RDOmMoLkqT5qzpR9Yf7UChPVrvKGs4Kd+fYJeb0R5W6mvZQ6/FrsLwAWLC2Q1 +rW1u4zIkO0ih5qd52dl/73u7SWqzWxPy1ynwqJefD4AA0uaJYtMlXHK2vYjutHvY +K7301gzc5fueqo1YMmPgsjjsj+ErR1t0ve7faOBy +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key new file mode 100644 index 0000000000000..a6de1f9958d32 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0F6B57727499DA47 + +OmK77UnFtk/zNEbNTxNJz73D2XWFDWLyHCDZPEXkX55vch/pXkkfVbWbPBFv35nA +LKni0j802Qnc1D4V3BUSmVWHk9SfjI5nlcDkSELbgCOpuZkf6Bmk8FgLfV42BFxn +lAiY+oBB4VV+rxA+HUV6CiWWrTgSjkvFyXCBZzcTEPdF2ifWerjsWKOjQZJtmvMX +J5DhYCCp1/n4R/OQpYxQiOqJdUxbKx4k0h139ySK2PggdL17w1a7AuQnHwJO3+ic +1IntPKD/ZhpAPPzq8A5R5jZyvrSj9Dgv94PXAQ5xTZWnZd2nuJtbkrYJ47pBR3Re +R2aZdF/N8ljG1TYHuJXdiL3A80Y3AS00TFNgSAZKSz5Ktt6zI2EAZu9xdHd8EfUm +m3qJmfce9P9cCBzo7DLGHwRMfu9hEFWN9dRD8KWNcB+ahQ1/jItzi25yZM6vD6+S +ZVUzegybeYlMwPks3YObX9IdUSwAd9F76SVwHCsziKQW4RfETaShG/oRNqq04nqA +E//KUl5bfTuv8jumyMlg6iiqIDQAUvzI74mWe2lIy6rglm2rR39SN4NxSrnTwoz4 +KAf+kHWJVyxFqEYs+dqboRWpRfQac3+iYoIlZFob/nRhNyKnccTkHtjh7+1C8CXI +sYXhuJZLCoiXh990M9t1ct0hqfWLNALlEsJesfRG8/fvi+LZd9i3fyCjrM+z96/G +/2zQzdga4bOs3ZEBluYFYkhHRJw1rAF3LTcWYvjP0gjZYVQki7AsLb0me1selS6O +P1bXaLaSUvMsAVO0wOtHMXAoBgEybP4+OonLiMScjdQZ2KRQ8L8OwzuGt0yguPRy +7wQv4NrH8LQu+X7tlQox28kascZUNHxORbh9M/wWx/2htw88uXWb5vxbDe30Rras +mTg0Gxky/88ZWvYxr7PlhBRrrfkJQ9sF/RyygUFhpQaXTwspkpF+MZv+1X6ROHqR +OueSa606FrptZ5n4RRPjq0hVZQgWKMAlIxNSum+gFn/Z7Q9I6gKrGFxjkD65L1kK +BbvbHAomiTyphrMtBRP52VqsFr4NxCWzxr/ZSlwaxTEid2vYg3zm7ls4dHYjUiNR +cs/JZJTkXn2aVaILSQkr9/I0eOOH9t/APSXHY8urQuYsDdmOOL7J2tlh3w1ivP8A +vVeomdUr2jgn53pBzbaLlTfsZ9+UneuLcztLfqN+BydQq1bKWvn2j3GvUkmhE//M ++fpo+uGlslMLh8rjtRH1y9rtCKhLgIxLO4U/ZJksFcJAqF3mR+Xxkrf82LUrAg8x +Oj++3QhOJE7f+vKog8b0gGrySSwzII2Ar7KiJDVJaZpmbbXToBlcC7xoksN3Ra0E +15WxKBSRqb7gi2+ml02rwtFMzq93H05Uoa9mG8uf1QH8t/+o6fniFx5N5kKWmPMy +shXjaYg7NzEBAkxI4VO41faMxEj/CUV0klQDPbnAsTCrcYu7CS2lml3e0zVf6RB8 +plXee99DiWpHZTRoGzpInK3DpnGRP1Frgl1KyhT+HayFZeYSMHfVSFPk3CKKmtEp +r+J/SrpGnEx0NKK3f+MxflZfnMIvgjGxkHdgSaDpz9iTjveq176Bq1GmNLALotOq +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt new file mode 100644 index 0000000000000..8ffb02e3d5794 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIVAOSHUsKiRx+ekWEEmfI2Q2q3B5hoMA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDQzMDAzNTU0NloXDTI4MTEyODAzNTU0NlowFDESMBAG +A1UEAxMJdHJhbnNwb3J0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +wBaoGJ9vv9yFxCOg24CsVfwSThOPnea8oujexGZYDgKkCdtcVn03tlyomjOra/dL +PJ0zOvUyktTxv022VQNhkJ/PO+w/NKpHBHaAVZE0o2zvUf8xQqXoHw0S6rAhurs5 +50r8QRkh1Z3ky3uOcFs0pXYCR/2ZVmQNSBhqmhUSK5y0VURot1MtPMw1SeqyabZQ +upDTJ6um/zk2LalfChKJ3vGQGEW7AGfv10eIWSmqQx6rLWAGO4MDelbZhUUr5iFc +D4fW0/MNUXJHTBO5Dyq6n63Wsm0jTYK72bSVw8LZS+uabQCtcHtKUZh38uUEUCjp +MDVY7YmDv0i8qx/MvWasbwIDAQABo4HgMIHdMB0GA1UdDgQWBBQwoESvk9jbbTax +/+c5MCAFEvWW5TAfBgNVHSMEGDAWgBRI4mOaeunbu60GfjWTpHcvhb6/YTCBjwYD +VR0RBIGHMIGEgglsb2NhbGhvc3SCF2xvY2FsaG9zdDYubG9jYWxkb21haW42hwR/ +AAABhxAAAAAAAAAAAAAAAAAAAAABggpsb2NhbGhvc3Q0ggpsb2NhbGhvc3Q2ghVs +b2NhbGhvc3QubG9jYWxkb21haW6CF2xvY2FsaG9zdDQubG9jYWxkb21haW40MAkG +A1UdEwQCMAAwDQYJKoZIhvcNAQELBQADggEBAIQ8/PLfsZ1eKOWW74a4h/Uh5eh8 +u9Led1v+U9tszmULN8JoYSEgyql6zy2pJOuIVLwI9cUvrcypUSDL53NmWhTGAjEL +jbww/G1cngBh5cBzAPq3lRL2lwc8j3ZZ16I1eNyWastvBDdtANlDArCUamZoboBm +HE/jrssC9DOQhxAraiitH3YqjquqztEp1zIuqRI0qYTDFNPzyfyXIyCFIT+3eVI5 +22MqjFL+9IDuoET+VU1i22LhF32TEPotz2hfZTFddql0V1IOJQuVkDkQGFvaJMFy +Xw7d4orV3sxzQYd7muCoaao7g/F675KqpZiiVHqKxTOLafF/MPcfLhH6xZk= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key new file mode 100644 index 0000000000000..f540e17202492 --- /dev/null +++ b/x-pack/plugin/security/qa/basic-enable-security/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,0B9EFA0829A750FB + +NCrPD7gkQ4Jr5/xIiohWILW3nO/WmNjApqOIc5g/wX/xJpk/554f8zCZ8dUD0D2E +ZW+z7Yj8GWKB0E6+hQZ+3ZUHLYASYSpSDVjg8UaaCxQyoVcUhshahFprqlzgU/An +Er8TbrGvhH0VmNlcQhaImqCOk41Hf8gjrxrtoLKbk3DfTk/Uuv4Jlsz4X+oSBVZN +fezIN70IZvGLKu7O3T9DeVLV1bLL6hNGIXnYe+FzLomMck2UoFv6uGS1VyFIGNf0 +ly80NGgdWTGxzLmiiGCgm5gbqbIehMsei1CC3jZIcfgfGyp4NVvF4HxFxZLTR3kY +YqzBWta/PoY6XXOlLFZupYt/YMt9hU6It9xdudPyNxwSuFXU66Fc08Ljj151iyhv +Ggf88jo9xSVvKOlqqHN6dY/xo9CfzTyuldG4jsKVHgGosSGghksjZ+PpHc7Mo5aP +S/UofhQgApJgU30TQPiQuJ+my/h9CiJyIgP7HnZtltwxg1k3dj+LxlpRKvjTOfuc +epOFmPeIdPkrQDir0j9+h+yoMgeqoT2unUYXw/qx5SVQxB5ckajLmJkUJPej9U3O +wASqNcWCTBEkGt102RU8o6lywdzBvfTB7gegR6oDvRfaxHOiUrRT/IwgszRfIdoC +fZa7Pb9pUuR3oY4uduDYgIKnxJhhQF2ERVXsfQeyxdiHEXvRnBFoAhoDjO8rWv07 +xiFPVMCAqXPImmdI34QezuzV2MUIVlKyeovbf+Kjv/Uat3zTj5FbmyVHcmPXpTY7 +t5iTQG+nQwz6UGcM5lF40EWrRdCzHEXNszwEY3Oz8D5rgBa6kxHYjcG9rzbTGlk2 +gsKdKA0am0hnCCJdTxbK5AkDcCWn/eclw0RPpbhFv5anvHTJ5WAWE7ZaACRuSfvy +UbNRGiWo4cNcR7+PGgV5184zjwJOql1mz+I79tlpxtK/FazP61WAYKOeEx1paKXX +syq+WDWgoZu/RzKDyTu10NUgq9J/IXDBn8/JjOVPCmPhMMLxNdoUhMfO4Ij9+3Jv +mH6ZaU6E+NZuc5N4Ivws42PwNY9FoyuLLgMBbezjhepQrDveHUK5v0weWqEapZ7Z +4KkFAeK7pjuItn5Of+233cp9Y68G8NrwMLQzI23kebNJwwzUMf3DnUJCXiy3PvrF +WpA0Q6/FspJgG3x2AXKo2QsHxydW+4w4pkawS9TCl0E03D7V6Gf17/HOxPDSH972 ++Yzzv8IkaOw5g+paeX9+tHjDFaxuvKiFyn/J7xYZAAQUoa2uQu440RakE73qLO34 +wtWdRzvIYitwLNJSfSojQDNoXuv8eyI/hP573cs6pmbheKXG1XKsWfpfj8sI7OkH +AdjRyeToSKbZ8yCn2vp0jyaRocOucu5oo7c0v+IocWOgdw+913EToJ6G3ck1heVR +b/U04VqKkXowO1YK7xDBAalMxyWq40spIKCC8HBBlng3vfUKqF46q9bMpesXnwPr +/00JfDVhFbqkJbqB8UYpjs9MN+vV5A7lsYbObom4pV25FSnwNSyxK0bhWGfZgutI +pjeQDkvHNG606AsqLz6SmIJP/GBBSMwvT3PGMPOO5XcayKeK3cbOQYJ0Yh7Muoqe +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/build.gradle b/x-pack/plugin/security/qa/build.gradle new file mode 100644 index 0000000000000..74412a094b489 --- /dev/null +++ b/x-pack/plugin/security/qa/build.gradle @@ -0,0 +1,18 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + compile project(':test:framework') +} + +subprojects { + project.tasks.withType(RestIntegTestTask) { + final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xPackResources) { + include 'rest-api-spec/api/**' + } + } +} + diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle new file mode 100644 index 0000000000000..864a1e5180934 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -0,0 +1,35 @@ +import org.elasticsearch.gradle.http.WaitForHttpResource + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +integTestCluster { + numNodes=2 + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.http.ssl.enabled', 'false' + setting 'xpack.security.transport.ssl.enabled', 'false' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.authc.api_key.enabled', 'true' + + extraConfigFile 'roles.yml', project.projectDir.toPath().resolve('src/test/resources/roles.yml') + setupCommand 'setupUser#admin_user', 'bin/elasticsearch-users', 'useradd', 'admin_user', '-p', 'admin-password', '-r', 'superuser' + setupCommand 'setupUser#security_test_user', 'bin/elasticsearch-users', 'useradd', 'security_test_user', '-p', 'security-test-password', '-r', 'security_test_role' + + waitCondition = { node, ant -> + WaitForHttpResource http = new WaitForHttpResource("http", node.httpUri(), numNodes) + http.setUsername("admin_user") + http.setPassword("admin-password") + return http.wait(5000) + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/test/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java b/x-pack/plugin/security/qa/security-basic/src/test/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java new file mode 100644 index 0000000000000..837c9ac4d8ded --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/test/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.security.authc.InternalRealms; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Base64; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class SecurityWithBasicLicenseIT extends ESRestTestCase { + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("security_test_user", new SecureString("security-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + public void testWithBasicLicense() throws Exception { + checkLicenseType("basic"); + checkSecurityEnabled(false); + checkAuthentication(); + checkHasPrivileges(); + checkIndexWrite(); + assertFailToGetToken(); + assertFailToGetApiKey(); + assertAddRoleWithDLS(false); + assertAddRoleWithFLS(false); + } + + public void testWithTrialLicense() throws Exception { + startTrial(); + String accessToken = null; + Tuple keyAndId = null; + try { + checkLicenseType("trial"); + checkSecurityEnabled(true); + checkAuthentication(); + checkHasPrivileges(); + checkIndexWrite(); + accessToken = getAccessToken(); + keyAndId = getApiKeyAndId(); + assertAuthenticateWithToken(accessToken, true); + assertAuthenticateWithApiKey(keyAndId, true); + assertAddRoleWithDLS(true); + assertAddRoleWithFLS(true); + } finally { + revertTrial(); + assertAuthenticateWithToken(accessToken, false); + assertAuthenticateWithApiKey(keyAndId, false); + assertFailToGetToken(); + assertFailToGetApiKey(); + assertAddRoleWithDLS(false); + assertAddRoleWithFLS(false); + } + } + + private void startTrial() throws IOException { + Response response = client().performRequest(new Request("POST", "/_license/start_trial?acknowledge=true")); + assertOK(response); + } + + private void revertTrial() throws IOException { + client().performRequest(new Request("POST", "/_license/start_basic?acknowledge=true")); + } + + private void checkLicenseType(String type) throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo(type)); + } + + private void checkSecurityEnabled(boolean allowAllRealms) throws IOException { + Map usage = getAsMap("/_xpack/usage"); + assertThat(usage, notNullValue()); + assertThat(ObjectPath.evaluate(usage, "security.available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.enabled"), equalTo(true)); + for (String realm : Arrays.asList("file", "native")) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(true)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(true)); + } + for (String realm : InternalRealms.getConfigurableRealmsTypes()) { + if (realm.equals("file") == false && realm.equals("native") == false) { + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".available"), equalTo(allowAllRealms)); + assertThat(ObjectPath.evaluate(usage, "security.realms." + realm + ".enabled"), equalTo(false)); + } + } + } + + private void checkAuthentication() throws IOException { + final Map auth = getAsMap("/_security/_authenticate"); + // From file realm, configured in build.gradle + assertThat(ObjectPath.evaluate(auth, "username"), equalTo("security_test_user")); + assertThat(ObjectPath.evaluate(auth, "roles"), contains("security_test_role")); + } + + private void checkHasPrivileges() throws IOException { + final Request request = new Request("GET", "/_security/user/_has_privileges"); + request.setJsonEntity("{" + + "\"cluster\": [ \"manage\", \"monitor\" ]," + + "\"index\": [{ \"names\": [ \"index_allowed\", \"index_denied\" ], \"privileges\": [ \"read\", \"all\" ] }]" + + "}"); + Response response = client().performRequest(request); + final Map auth = entityAsMap(response); + assertThat(ObjectPath.evaluate(auth, "username"), equalTo("security_test_user")); + assertThat(ObjectPath.evaluate(auth, "has_all_requested"), equalTo(false)); + assertThat(ObjectPath.evaluate(auth, "cluster.manage"), equalTo(false)); + assertThat(ObjectPath.evaluate(auth, "cluster.monitor"), equalTo(true)); + assertThat(ObjectPath.evaluate(auth, "index.index_allowed.read"), equalTo(true)); + assertThat(ObjectPath.evaluate(auth, "index.index_allowed.all"), equalTo(false)); + assertThat(ObjectPath.evaluate(auth, "index.index_denied.read"), equalTo(false)); + assertThat(ObjectPath.evaluate(auth, "index.index_denied.all"), equalTo(false)); + } + + private void checkIndexWrite() throws IOException { + final Request request1 = new Request("POST", "/index_allowed/_doc"); + request1.setJsonEntity("{ \"key\" : \"value\" }"); + Response response1 = client().performRequest(request1); + final Map result1 = entityAsMap(response1); + assertThat(ObjectPath.evaluate(result1, "_index"), equalTo("index_allowed")); + assertThat(ObjectPath.evaluate(result1, "result"), equalTo("created")); + + final Request request2 = new Request("POST", "/index_denied/_doc"); + request2.setJsonEntity("{ \"key\" : \"value\" }"); + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request2)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized for user [security_test_user]")); + } + + private Request buildGetTokenRequest() { + final Request getToken = new Request("POST", "/_security/oauth2/token"); + getToken.setJsonEntity("{\"grant_type\" : \"password\",\n" + + " \"username\" : \"security_test_user\",\n" + + " \"password\" : \"security-test-password\"\n" + + "}"); + return getToken; + } + + private Request buildGetApiKeyRequest() { + final Request getApiKey = new Request("POST", "/_security/api_key"); + getApiKey.setJsonEntity("{\"name\" : \"my-api-key\",\n" + + " \"expiration\" : \"2d\",\n" + + " \"role_descriptors\" : {} \n" + + "}"); + return getApiKey; + } + + private String getAccessToken() throws IOException { + Response getTokenResponse = adminClient().performRequest(buildGetTokenRequest()); + assertThat(getTokenResponse.getStatusLine().getStatusCode(), equalTo(200)); + final Map tokens = entityAsMap(getTokenResponse); + return ObjectPath.evaluate(tokens, "access_token").toString(); + } + + private Tuple getApiKeyAndId() throws IOException { + Response getApiKeyResponse = adminClient().performRequest(buildGetApiKeyRequest()); + assertThat(getApiKeyResponse.getStatusLine().getStatusCode(), equalTo(200)); + final Map apiKeyResponseMap = entityAsMap(getApiKeyResponse); + assertOK(getApiKeyResponse); + return new Tuple<>(ObjectPath.evaluate(apiKeyResponseMap, "api_key").toString(), + ObjectPath.evaluate(apiKeyResponseMap, "id").toString()); + } + + private void assertFailToGetToken() { + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(buildGetTokenRequest())); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("current license is non-compliant for [security tokens]")); + } + + private void assertFailToGetApiKey() { + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(buildGetApiKeyRequest())); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("current license is non-compliant for [api keys]")); + } + + private void assertAuthenticateWithToken(String accessToken, boolean shouldSucceed) throws IOException { + assertNotNull("access token cannot be null", accessToken); + Request request = new Request("GET", "/_security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.addHeader(HttpHeaders.AUTHORIZATION, "Bearer " + accessToken); + request.setOptions(options); + if (shouldSucceed) { + Response authenticateResponse = client().performRequest(request); + assertOK(authenticateResponse); + assertEquals("security_test_user", entityAsMap(authenticateResponse).get("username")); + } else { + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(e.getMessage(), containsString("missing authentication credentials for REST request")); + } + } + + private void assertAuthenticateWithApiKey(Tuple keyAndId, boolean shouldSucceed) throws IOException { + assertNotNull("API Key and Id cannot be null", keyAndId); + Request request = new Request("GET", "/_security/_authenticate"); + RequestOptions.Builder options = request.getOptions().toBuilder(); + String headerValue = Base64.getEncoder().encodeToString((keyAndId.v2() + ":" + keyAndId.v1()).getBytes(StandardCharsets.UTF_8)); + options.addHeader(HttpHeaders.AUTHORIZATION, "ApiKey " + headerValue); + request.setOptions(options); + if (shouldSucceed) { + Response authenticateResponse = client().performRequest(request); + assertOK(authenticateResponse); + assertEquals("admin_user", entityAsMap(authenticateResponse).get("username")); + } else { + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(e.getMessage(), containsString("missing authentication credentials for REST request")); + } + } + + private void assertAddRoleWithDLS(boolean shouldSucceed) throws IOException { + final Request addRole = new Request("POST", "/_security/role/dlsrole"); + addRole.setJsonEntity("{\n" + + " \"cluster\": [\"all\"],\n" + + " \"indices\": [\n" + + " {\n" + + " \"names\": [ \"index1\", \"index2\" ],\n" + + " \"privileges\": [\"all\"],\n" + + " \"query\": \"{\\\"match\\\": {\\\"title\\\": \\\"foo\\\"}}\" \n" + + " }\n" + + " ],\n" + + " \"run_as\": [ \"other_user\" ],\n" + + " \"metadata\" : { // optional\n" + + " \"version\" : 1\n" + + " }\n" + + "}"); + if (shouldSucceed) { + Response addRoleResponse = adminClient().performRequest(addRole); + assertThat(addRoleResponse.getStatusLine().getStatusCode(), equalTo(200)); + } else { + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(addRole)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("current license is non-compliant for [field and document level security]")); + } + } + + private void assertAddRoleWithFLS(boolean shouldSucceed) throws IOException { + final Request addRole = new Request("POST", "/_security/role/dlsrole"); + addRole.setJsonEntity("{\n" + + " \"cluster\": [\"all\"],\n" + + " \"indices\": [\n" + + " {\n" + + " \"names\": [ \"index1\", \"index2\" ],\n" + + " \"privileges\": [\"all\"],\n" + + " \"field_security\" : { // optional\n" + + " \"grant\" : [ \"title\", \"body\" ]\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"run_as\": [ \"other_user\" ],\n" + + " \"metadata\" : { // optional\n" + + " \"version\" : 1\n" + + " }\n" + + "}"); + if (shouldSucceed) { + Response addRoleResponse = adminClient().performRequest(addRole); + assertThat(addRoleResponse.getStatusLine().getStatusCode(), equalTo(200)); + } else { + ResponseException e = expectThrows(ResponseException.class, () -> adminClient().performRequest(addRole)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("current license is non-compliant for [field and document level security]")); + } + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/test/resources/roles.yml b/x-pack/plugin/security/qa/security-basic/src/test/resources/roles.yml new file mode 100644 index 0000000000000..9b2171257fc61 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/test/resources/roles.yml @@ -0,0 +1,8 @@ +# A basic role that is used to test security +security_test_role: + cluster: + - monitor + - "cluster:admin/xpack/license/*" + indices: + - names: [ "index_allowed" ] + privileges: [ "read", "write", "create_index" ] diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle new file mode 100644 index 0000000000000..9f5ef26f6e6a6 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -0,0 +1,48 @@ +import org.elasticsearch.gradle.http.WaitForHttpResource + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +forbiddenPatterns { + exclude '**/*.key' + exclude '**/*.p12' +} + +File caFile = project.file('src/test/resources/ssl/ca.crt') + +integTestCluster { + numNodes=2 + + extraConfigFile 'http.key', project.projectDir.toPath().resolve('src/test/resources/ssl/http.key') + extraConfigFile 'http.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/http.crt') + extraConfigFile 'transport.key', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.key') + extraConfigFile 'transport.crt', project.projectDir.toPath().resolve('src/test/resources/ssl/transport.crt') + extraConfigFile 'ca.crt', caFile + + setting 'xpack.ilm.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.http.ssl.certificate', 'http.crt' + setting 'xpack.security.http.ssl.key', 'http.key' + setting 'xpack.security.http.ssl.key_passphrase', 'http-password' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.certificate', 'transport.crt' + setting 'xpack.security.transport.ssl.key', 'transport.key' + setting 'xpack.security.transport.ssl.key_passphrase', 'transport-password' + setting 'xpack.security.transport.ssl.certificate_authorities', 'ca.crt' + + waitCondition = { node, ant -> + WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) + http.setCertificateAuthorities(caFile) + return http.wait(5000) + } +} + diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java b/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java new file mode 100644 index 0000000000000..c8d25ca0fc35b --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/java/org/elasticsearch/xpack/security/TlsWithBasicLicenseIT.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; + +public class TlsWithBasicLicenseIT extends ESRestTestCase { + private static Path httpTrustStore; + + @BeforeClass + public static void findTrustStore() throws Exception { + final URL resource = TlsWithBasicLicenseIT.class.getResource("/ssl/ca.p12"); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource /ssl/ca.p12"); + } + httpTrustStore = PathUtils.get(resource.toURI()); + } + + @AfterClass + public static void cleanupStatics() { + httpTrustStore = null; + } + + @Override + protected String getProtocol() { + return "https"; + } + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(TRUSTSTORE_PATH, httpTrustStore) + .put(TRUSTSTORE_PASSWORD, "password") + .build(); + } + + public void testWithBasicLicense() throws Exception { + checkLicenseType("basic"); + checkCertificateAPI(); + } + + public void testWithTrialLicense() throws Exception { + startTrial(); + try { + checkLicenseType("trial"); + checkCertificateAPI(); + } finally { + revertTrial(); + } + } + + private void startTrial() throws IOException { + Response response = client().performRequest(new Request("POST", "/_license/start_trial?acknowledge=true")); + assertOK(response); + } + + private void revertTrial() throws IOException { + client().performRequest(new Request("POST", "/_license/start_basic?acknowledge=true")); + } + + private void checkLicenseType(String type) throws IOException { + Map license = getAsMap("/_license"); + assertThat(license, notNullValue()); + assertThat(ObjectPath.evaluate(license, "license.type"), equalTo(type)); + } + + private void checkCertificateAPI() throws IOException { + Response response = client().performRequest(new Request("GET", "/_ssl/certificates")); + ObjectPath path = ObjectPath.createFromResponse(response); + final Object body = path.evaluate(""); + assertThat(body, instanceOf(List.class)); + final List certs = (List) body; + assertThat(certs, iterableWithSize(3)); + final List> certInfo = new ArrayList<>(); + for (int i = 0; i < certs.size(); i++) { + final Object element = certs.get(i); + assertThat(element, instanceOf(Map.class)); + final Map map = (Map) element; + certInfo.add(map); + assertThat(map.get("format"), equalTo("PEM")); + } + List paths = certInfo.stream().map(m -> String.valueOf(m.get("path"))).collect(Collectors.toList()); + assertThat(paths, containsInAnyOrder("http.crt", "transport.crt", "ca.crt")); + } + + +} + diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc new file mode 100644 index 0000000000000..9ff94bf07869d --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/README.asciidoc @@ -0,0 +1,48 @@ += Keystore Details +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating certificates +The certificates in this directory have been generated using elasticsearch-certutil (7.0.0 SNAPSHOT) + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil ca --pem --out=ca.zip --pass="ca-password" --days=3500 +unzip ca.zip +mv ca/ca.* ./ + +rm ca.zip +rmdir ca +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=http --out=http.zip --pass="http-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip http.zip +mv http/http.* ./ + +rm http.zip +rmdir http +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +elasticsearch-certutil cert --pem --name=transport --out=transport.zip --pass="transport-password" --days=3500 \ + --ca-cert=ca.crt --ca-key=ca.key --ca-pass="ca-password" \ + --dns=localhost --dns=localhost.localdomain --dns=localhost4 --dns=localhost4.localdomain4 --dns=localhost6 --dns=localhost6.localdomain6 \ + --ip=127.0.0.1 --ip=0:0:0:0:0:0:0:1 + +unzip transport.zip +mv transport/transport.* ./ + +rm transport.zip +rmdir transport +----------------------------------------------------------------------------------------------------------- + +[source,shell] +----------------------------------------------------------------------------------------------------------- +keytool -importcert -file ca.crt -keystore ca.p12 -storetype PKCS12 -storepass "password" -alias ca +----------------------------------------------------------------------------------------------------------- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt new file mode 100644 index 0000000000000..5bcb6f77bc21b --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSTCCAjGgAwIBAgIUNsCMQBpQB3zJAC1iERdc7yADVw0wDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkwMzI5MDUxMjEyWhcNMjgxMDI3MDUxMjEyWjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMJL4SrJJsQpKFuHsNnWwzM9 +2Cnmsc7WzGEskV0ncSUloMxUZaZ8CJ2iuubN6KPe75ke8SS9vlNG3MEWRBVSPY4H +EJNcyiiI1w9c/yom6Kfvep1RvvRHlp+k/bDPzzuj4B8Dyg66TVYKRm+9uRWAUvZr +djhFB3cawbM1jD9ZaBLM4Qbdg0AlMqXWpkLPVtkD8lREPkAIhYxKx7TYqB1SbMg5 +ejfoRGF5qfl4luegWRlQKkOBCcJPZamcccNjDq9eXQm3vrp0/QEp0ODG14wU3B9R +G+2/yhh5KP3WWK/uksAmEv8YzG7UaCLNJRk/FuPz8uoSGLPM1e+2HWXsR9OnlF8C +AwEAAaNTMFEwHQYDVR0OBBYEFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMB8GA1UdIwQY +MBaAFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAHZeLZ7yCvqQOJbQ3yoixYLVR33dSx/T/W5WQQGYcQ7TUZ4N +gXkV9kGD+9I/8NWgkttx4TTieWctyNPrhAqqWGuGvhCQ+WL8m67EPRiVdw7EY+61 +qlUbAdK39adDqbDeUI07dzd+wKlhwnHtd2dTcJEGluwLaU4ftuLA8DQNwzWxZVAW +EWzfTUgdc1SYTysE5C0d1Q9CbI+o0Na+CaW4DRqGh1OGyH7Fyck9WQp1nOAEQhD9 +sn4FOC4w+T92t/Ekpfcm5HHkYjGWK1EsCkRCj1m8QtyqBgByeXHCidH2pfKIuVdl +ZnaOfIkCQx49gLARjzzGp/OC/UfKVCWzpLHn7dY= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key new file mode 100644 index 0000000000000..418d3ed062185 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,67376A5606FB27E9 + +v4OAjurrB7Tc2mVswSeaaYAiFomvSQmre8DlC5VNvavzT6Hlx5hIyEVIttcNeTeD +Hj4d+JOp5OO5Ew5cWgo0jtR2QIjGbrQe8t8oedJwhEiYC0IfX0rItJv1iaz4WO+8 +hz4J1lwAI9wFabmXIeHx0q3ZqqIfSOoAepO8W2SqIj0KSz3tKRoYaX7AzZ27muLN +K2Mej1EX/ftgZZNgfU62gJzGGsdQecLc+UZBDVTPZL3PLZmQV0r1sBXaq56Qk78t +DsUyYwA4zvPBIPkfydTxobylt1pSeZ7Yyni+iQk4X7T4jj3Q6wKrwjPNJ6p8Xcwn +4BN37DIYPPBEp56EUCbxl+iMkfRoCjZdaqhycw4LjKB0wloY2Zko6FaYTd0qPZ/m +2GM8MvIQ9bc4t9Bef2VAXhb8IUXJ+ro+sB7vlQRSLQ1JwHPAPiIFyRmilezAaupA +2DNLBIlmgMzh5Lh6vIcyHQVxsCoJesmVQCyyBy4lFPU9afcYLWjzgnBhW2SikTpW +/lC3VDloUjIYfC3qYhbHIomsUMCGk3xHIwLw1cNFnf7c/RX1q5bBZrJ8q6GVh/Rb +ulHcuCm5g/Jvt8TM8c2WIE5mzwkoFIe/XVY33Lyk237qCsPlVWwFpxa0UtWVpDnk +uuubgI0cb+zehN2f5sgHtdbphNNTflZyW+Uk0lCbYGNakXBILePFmURsThW3gQ44 +g+zPaiGkbB1qwE/TS3Vz17j8DkgWRsEJP7IBsZ/ljaUcs3zujH6EKN9YtwyIeoHo +VHBuF4RGew2Ps0NoLGYanpvu01ZUUr2C0ZbDjXLBy8ajOc5zgyMCBead19T+piFw +iGvA8D7eILz1xzbAcX7dry06Mc9o/CbFcRMIis3LVvdSuZDoRk/cv0mKo6rq/1MS +VeYgPjJ8QWuhulIYkmNipTRdzMsXEafEdsp+GruKnNri0u/lirfhYAXDGp2GAttJ +zKnbPkHSJRt1xWgtimU+CnnpEOp+qd2yFNgT/Nn2yjrsPqLqTkEdzbh2DoCYGPHe +HoAcs+MePKfqBh+W2MEJ/ZdDVz93lKoDTuk2cjaVVe+7YBdHW0gQzfW5ArscadUV ++mSzhUm9AIhM/Gk6t7rgVoWyO6PvkTgENKFmUUQkHnJWaaDIzji2xFR114Huw5rN +gHPn8HOKPIhVu1UV2N/MFLrjjvn8bft/vLkSxZ3c7AgYkPr8Mmd0b8ufTOlk5a+W +hkR4D7WZ7Hgkj1NIvRbjxCXTHFbHZqKJHeTTNCpCUygIH5g8h7RGVPS0XKylpbr1 +2kZU/AwlPcAPba+UcTKXOvy02NmiV5Bg6qYc8rcxv6aXKPOrxeW3Iop/ZesF7Nnu +ccR+rI78cQIGD1gAo3xLJ10/p0Rb9R/pWfHUY499Oymc926qWaj3mEl+xOJXxWOr +3Uf4yMg8mrfcm3JW7clWy3l+/++CSWBS/zqUpXKy5CbVdR8XQNS5Pg0fDgwkrcbv +7TviQ+vYD7aEI0w6mviljPkYVTXNpnRHyF7VfaEYff8032GxW99D3zeK7dd6yP4k +W/oN5IwXCvnfrteNtqSOIPOWw9gAp4x4EzmCin77s8SgMHOGsPcEhA== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 new file mode 100644 index 0000000000000..e2db32e6ddc6c Binary files /dev/null and b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/ca.p12 differ diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt new file mode 100644 index 0000000000000..cd0dcb680c2ae --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDszCCApugAwIBAgIVAJX8GTm+AWIicokE5npzZ2B3qad3MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDMyOTA1MTIyNVoXDTI4MTAyNzA1MTIyNVowDzENMAsG +A1UEAxMEaHR0cDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMGvsPmg +4lKfd1ie6TZQLdCxfXy6MooLHac1wUxyvHcUxlbuSchj+A2gVPBk6VaCV8OO4X7T +MslTJKw5877m28Xzw+CmUgDsXAJJy2IvM8X0IP/xktkJQ3uSUReSW2650TFj9Zcm +Z3AtMblo+cNnZMNWJBW1G1QMHHKMY5kukaB7Ia6CBec60k2HrkS6xmsMgwQPBa/k +VlbHkI7RzbmxohVJFHL34EFhifEL0qkYU5MnZ8PjH8U749VoZOYcY1MKb2sw9iYn +JTOv1gIFhd4Sw37occxDVaqZU/1X90ijZyvB/AugxRfmpLb83ZRMdVeQTiiXqMkg +1g94h7hgPpLA9AkCAwEAAaOB4DCB3TAdBgNVHQ4EFgQUc/bPDUIvgLwg9xwf9CxP +ec84o1YwHwYDVR0jBBgwFoAUv4ZtbM/ec9/H46q9bkJgKoc3xmUwgY8GA1UdEQSB +hzCBhIIJbG9jYWxob3N0ghdsb2NhbGhvc3Q2LmxvY2FsZG9tYWluNocEfwAAAYcQ +AAAAAAAAAAAAAAAAAAAAAYIKbG9jYWxob3N0NIIKbG9jYWxob3N0NoIVbG9jYWxo +b3N0LmxvY2FsZG9tYWlughdsb2NhbGhvc3Q0LmxvY2FsZG9tYWluNDAJBgNVHRME +AjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAJW7WWQkuNjDlQQ5H6bhMr2LhbC9TZWgFK +zWsIWuhd1QxiWbTp/Yegcbqs3hZ9MQtxU4egml/sMAdZSF3Kg3NeYtrHDj//oKYo +VSfTPNjQLG1/ckCM0RDfFYOV+Sb3ktau5QZGL+5ZDfcfPLSHCSHeP0tft2R03Hp4 +pOX8/xAVmv0hxE74X5qodQyNFdDa6rtRZESLzY1b+oaEhKM49MZCNZL9TvvNUkWC +hXdaVehqBVJkrlsnli6oqPBjpKNP2YkRG3eqy/Qd/sg6rwJqu/B0KBI8QBDkokSY +YORRviEmSe0+hmcBCTYZWN8WX3BrEPuGdBJXWi5G8GPGFg4rrOUE +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key new file mode 100644 index 0000000000000..3b7571db54319 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/http.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,A46C453D20DC86A7 + +eFBKmjJUmailcnfc1+a6lwR8G7sk4ff1De5hIYY8iNkpP6XVxZ/LrXttVF1x1SWy +YaUJL35Optzy4W+LglJgAdNo9XGaCsHuSi3z7aqYNdihSldKxDw3iIJEEuB63Lv7 +eu4pEYdOlRElEs71cmjMCSmg1pfeDRruShB9RUKy3Iw8tM6tV+t+vIaiVftb3i9O +AaTEUgAJqQjcISWy5JAxRwEwVDAhHe23vbVomxXlJKuTroezPFt5SxXQmdfNmP7B +D8iZR/Uf+7XdCFKC/7n6enYZfg5/IoaOO9sPG4bueFKmLAdXpmN1hKvJwIG1qKQT +Fz7x8FGi0S11BHDZMs5kJHBaiuXmq02mozb5XOFllQYl8+fsa4lscIFeQ/YbAjVo +g5nEVbqRUCSLy6F6JSX6SJB4ng/JMHzKLfhAUSpvotBxZbJ4IpNu06oCKjggiIoR +9z2YE6gR1pBJSyCDS8fJXtyLWN/WBdbvf1fw3t7utPFT606TYFOvt2KrSndcrTwb +EByWHJufxv8J+anrnnNM11RMTqhpi4MeXsaaA7jUCzh5QzxnT8imOyNDF8OVxEKk +Y9W9ToUchHojIJZGJhB2I1ndCUQaJF+OhLrjy2Zk/Imx3wBf3huyWAA8GNVQ04DD +mhDxWdZ30lJgxJH4xgk4l3nWBNAQ+X04lIyRi83tD/E9plX3EX2sWzBBHCSybh0C +bNHAQVMVaxEMTcCumk/USiuRcm4BL0495o4/debn9EExs95dw6pAhJoHZ8kc71GP +YOYNuQvz0Ljbu4ZO1/OgmNDtFuNV83GlDa6yUme/Di0SqmLzxUwPJIZ9I2dNtgLf +2emoUA9PSUl02Hcm5WN7AtmL/Pxz1joR/gKeNAII97PS9WFdqRS0ypwiiwp15mBU +LilEGB4V3laVJFw6sLFwPjWUYZCEhzSdAMnHfxrIZuhpfSi2W39w8Frqwx0JOUoX +HmogsyM/xqn9VelVNbWUP06IwJkcocWM1rzv3nkZOsKb5EhGOk1qrA/BKyajcazX +49x4wpIpJoz4tgStrlgxGZ0DeMT8PIrZGbZDhQ78MxnQe376CiXIOKtrZVOp6uoo +uDtYg9OiZZ2GDoSIgjAStpYbF4rkJI+3kyhR4oD8KfsC/rTG16hNCRnTIIiUECyU +1jWBLmqYWuMTiekb4asB6cWlQYwUUtSBt6ySB+zU+Cl0Wi3u+kXrsMthFnJE0GWB +EOCmHsvMqD+u0uArpJHpE0o9L3ePEkiDssU2MJdOLpb0AKW/uqAA/14a4JAr/y9Z +v+pUPDbjeoIXRNqzXkWEdHKZOnEGAE5QBLzScJqWU0YY7WP1+xpyoYapM37v9V/J +viNJW+gxvW9yZdxKzGm9P/UIjtndx2QnAa7mPgXOej/AMqpl+IkIJmvi13IEQTH2 +NuBghACrRp7YuffEroEs3P7fgCoiMHvabCiXkLhWoZqgVuiy72GuSwKEPK8bF30U +8u7lencUvnIRU9jL0kDaQL0kESw0f3dgE+ltQbgew5/rmqMgKpmDDoouLJf95wi2 +rvPGRb4QXpBO8V4/8VMPPJKT55ZDygjN45z1gwCZ2tbYtnKUOH82drx1TB2bvrso +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt new file mode 100644 index 0000000000000..93121ed8b15ca --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIUe2Oa37SVQ5G1SpWiRS+abpjuNPMwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkwMzI5MDUxMjM1WhcNMjgxMDI3MDUxMjM1WjAUMRIwEAYD +VQQDEwl0cmFuc3BvcnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCN +v6vW4Bwj0eku+Ivm6d+HQwzfLqAdnM8tHAgC4qMDk7a/X5ckTesTk2VOmX775zkT +SJex5uGuEuyTgZVEXQhkpZUXURGhnQ8/exxg2m3cwTin+o1XN5xCo6FUfU2IqQrf +1Xd7RKfXv/YCUlS2xzQVnFRYAYpMMzTtUloc37PWz7TYA/ei7p06BCKLGR785ipF +MWq0S+QVmldOlp1vhZrD+KpgxFdo0Gd+e0loLO6321sXBEksy4K/5FaknDT9Fc/f +NUVmLaiRPi2nW6nIBjYyoVhIPztkVdxfj7jNdJCvshnEY29Hhd7ra9njLbyxzK2d +ACpyf54TCNU0j5qRcqe7AgMBAAGjgeAwgd0wHQYDVR0OBBYEFDSaYLY3KEm7L3jF +iW7CwCdoqcZjMB8GA1UdIwQYMBaAFL+GbWzP3nPfx+OqvW5CYCqHN8ZlMIGPBgNV +HREEgYcwgYSCCWxvY2FsaG9zdIIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaHBH8A +AAGHEAAAAAAAAAAAAAAAAAAAAAGCCmxvY2FsaG9zdDSCCmxvY2FsaG9zdDaCFWxv +Y2FsaG9zdC5sb2NhbGRvbWFpboIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjQwCQYD +VR0TBAIwADANBgkqhkiG9w0BAQsFAAOCAQEAa3T5oaPucZRx5JFxqkSTaIpcptvw +iiZLpaEooX0QVMy+PkmnzNh/xaN5qWWzKFV4ihSURtgH7gbPjBF7/pTqqO8Ekshp +36I6WTuhvps4nR4iCKaMFfyCBDKBvtTIySxE2kZJlyvgAqdB3bww79FfZt+ftxEt +E1m5nFDWCxaATY0foYpRUAJTPfmnFWDZfP4ZglSWmNSfQAdsQfwMlu09jXWXw7Yx +Cd39f9KW1aQT4RstHNWuQwgskv0vuTo2r0r+1YWTNCFQVuA8OD620CmJs85zGOnj +5L0YyLK1KvvuARfjr/skpze7F1Leir9+NiaJjXA+xfnkoGniJ2AUvPC8xg== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key new file mode 100644 index 0000000000000..eace4a2085989 --- /dev/null +++ b/x-pack/plugin/security/qa/tls-basic/src/test/resources/ssl/transport.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,EAC448D0A9AC0BE9 + +OnQAA8FLp6KDtp+AivEZB+TmTgAZ7oExMFLPL4o64i5onxLlJ15jG4MJ/YEyRZRa +T+KJLfO5BSW7EhgPQrR6UQ2yQtKGEwqq0nboIqEnEnpJQzzas2/d9rGIQFd4+519 +GNzstFVz8bn2+Z6xN0YS8/lVPUF6sxbt1DGy/BlSpUze63WNw5vxO4zmOu+8lzvT +ZkK8VGbeqv9j0YF/57NeTQ+b473f1cyGexpv7wsJD+62cmie9Z0iNoqkrV3yjNBk +gqoxWe8I42rKsNJiL/H0tyLMfICaVJs2urQjs9GHJGS/uy+MlzJCaEG5LlcTCXq3 +0d0e+OCWzHzzcQiLlzg0W/iCbMEpMvZcWlTaATRLcY96QKHMku9xaPLuO5BvL/XF +HLP0xHsc3no0HqX9/BRZFNdtc+7u3An46UEDmyjNZRkDSmhC/vVa6/+5pnp2eU2N +b88/cTmGYDdGoImcp9nIhBnyMqNmSeuho3g+w5oa03HyjlEQ5MS5VXHOnzzbH8lr +fTxVx/PPb3Ui8bs2X93JNm6atL8Yn75QkyX7iYypuzzhgq3wKETHpV4VJ2XtfbK8 +HAvMIc+IOWDA3ZYqIgkA8yn3RzVB+mTf/px0aWR53Ie90uLXsF8y2F7nuScnVDqG +9ul03RSPfeO+bUnyl5JsPnRN/0i/Ge1/SvX+j4L3ir65NEvrC2BPEfzTFXh6KMs4 +VF1USmWPAgfg7FjJjUvi/7/2+YOswFTuMdun9plV3heJ9AyCyYrPJuP7iXeF+L1Z +nsGfD4ZaZJ81zXW1VqTTSBdyreK+t9YjGVL7hEUhv6k/SOlyhcvaYubB7f3aTegU +IN+2T3hFCnBNgvqHKtAJ1FBgzatavJOk4Oo0aDKThwCrxp9MdxPRBOMrBnRHsdtN +6/u7hHObFNIIBoxdSMMdF4NZXkYSMYCM2dq+FvEzDCJ6krHxq1W71j109F+Ow0B7 +Je4jXboH3rrvnh9HtowYWFufB6GPTCmV822iC1u6DGwNTLPunMMLhASSENNR3Lk0 +xtfVAjcKA8/Xo7Is62OOa2ud2Z4Zjl2OdANZ7lgZScprfiHI6LrHAw9tPGcn9xJ2 +8dtQILCSkoHKRWlR41e9Xx+jRhOXl3GKWqFKAtH3jGQu5kH+IgN5IeUIerbKe0Yn +vk+2QqLQssnQPkQDGketuSMx/+vCbvQscmfA+bfNB+UIbwsjmyQk5W+mxz3pncvG +KCat5pCspfdj0oVHl+WEoAR3raXFwcZAWm57HtCm3kfPTSImDLT6c+sCriY48vDN +YhC3DtzKwVfmw44q/hs0QzgWmt6p22ZwNMTnVxvQSJeFfLV/nEwxtmM/WFXoyDqF +UoR/T2p+ngRyysCtmYhf6Qnq2J6CZum7MUvIVtSL7c+eazXbVTTHbLFNrcX/Zitl +Bf03Rz7ZJGSlqczdhi5gTSIC4dD9hLWbQlw3OcH45UiGw5tcBaAd86FxarPqE2/Z +NQSp88Q9peJfTxcY9QyQhDDUqyfMDoNMRRVfEMP5qNicH3Y5jkKiCJwGbqIC238/ +38wcJnIrkwMk2tttgq1Lr1QfWplOHxe51zJ7zXjnigMkt/AodqjjNQ== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java index a8cb32d545e8d..82a58b94a83fe 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140SecureSettingsBootstrapCheck.java @@ -36,7 +36,7 @@ public BootstrapCheckResult check(BootstrapContext context) { if (fipsModeEnabled) { try (KeyStoreWrapper secureSettings = KeyStoreWrapper.load(environment.configFile())) { if (secureSettings != null && secureSettings.getFormatVersion() < 3) { - return BootstrapCheckResult.failure("Secure settings store is not of the latest version. Please use " + + return BootstrapCheckResult.failure("Secure settings store is not of the appropriate version. Please use " + "bin/elasticsearch-keystore create to generate a new secure settings store and migrate the secure settings there."); } } catch (IOException e) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index b5b10b06622fe..b56296b8c5768 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -188,9 +188,9 @@ import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor; import org.elasticsearch.xpack.security.rest.SecurityRestFilter; import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; -import org.elasticsearch.xpack.security.rest.action.RestCreateApiKeyAction; -import org.elasticsearch.xpack.security.rest.action.RestGetApiKeyAction; -import org.elasticsearch.xpack.security.rest.action.RestInvalidateApiKeyAction; +import org.elasticsearch.xpack.security.rest.action.apikey.RestCreateApiKeyAction; +import org.elasticsearch.xpack.security.rest.action.apikey.RestGetApiKeyAction; +import org.elasticsearch.xpack.security.rest.action.apikey.RestInvalidateApiKeyAction; import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction; import org.elasticsearch.xpack.security.rest.action.oauth2.RestInvalidateTokenAction; import org.elasticsearch.xpack.security.rest.action.privilege.RestDeletePrivilegesAction; @@ -398,7 +398,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste securityIndex.set(new SecurityIndexManager(client, SecurityIndexManager.SECURITY_INDEX_NAME, clusterService)); - final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex.get(), clusterService); + final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, getLicenseState(), + securityIndex.get(), clusterService); this.tokenService.set(tokenService); components.add(tokenService); @@ -438,8 +439,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste rolesProviders.addAll(extension.getRolesProviders(settings, resourceWatcherService)); } - final ApiKeyService apiKeyService = new ApiKeyService(settings, Clock.systemUTC(), client, securityIndex.get(), clusterService, - threadPool); + final ApiKeyService apiKeyService = new ApiKeyService(settings, Clock.systemUTC(), client, getLicenseState(), securityIndex.get(), + clusterService, threadPool); components.add(apiKeyService); final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, privilegeStore, rolesProviders, threadPool.getThreadContext(), getLicenseState(), fieldPermissionsCache, apiKeyService); @@ -690,12 +691,18 @@ public void onIndexModule(IndexModule module) { indexService.cache().bitsetFilterCache(), indexService.getThreadPool().getThreadContext(), getLicenseState(), indexService.getScriptService())); - /* We need to forcefully overwrite the query cache implementation to use security's opt out query cache implementation. - * This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do - * forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to - * unauthorized users. */ + /* + * We need to forcefully overwrite the query cache implementation to use security's opt-out query cache implementation. This + * implementation disables the query cache if field level security is used for a particular request. We have to forcefully + * overwrite the query cache implementation to prevent data leakage to unauthorized users. + */ module.forceQueryCacheProvider( - (settings, cache) -> new OptOutQueryCache(settings, cache, threadContext.get(), getLicenseState())); + (settings, cache) -> { + final OptOutQueryCache queryCache = + new OptOutQueryCache(settings, cache, threadContext.get(), getLicenseState()); + queryCache.listenForLicenseStateChanges(); + return queryCache; + }); } // in order to prevent scroll ids from being maliciously crafted and/or guessed, a listener is added that @@ -958,7 +965,7 @@ public Function> getFieldFilter() { public BiConsumer getJoinValidator() { if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), - DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) + DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings), settings) .andThen(new ValidateUpgradedSecurityIndex()) .andThen(new ValidateLicenseCanBeDeserialized()) .andThen(new ValidateLicenseForFIPS(XPackSettings.FIPS_MODE_ENABLED.get(settings))); @@ -969,18 +976,21 @@ public BiConsumer getJoinValidator() { static final class ValidateTLSOnJoin implements BiConsumer { private final boolean isTLSEnabled; private final String discoveryType; + private final Settings settings; - ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType) { + ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType, Settings settings) { this.isTLSEnabled = isTLSEnabled; this.discoveryType = discoveryType; + this.settings = settings; } @Override public void accept(DiscoveryNode node, ClusterState state) { License license = LicenseService.getLicense(state.metaData()); - if (license != null && license.isProductionLicense() && - isTLSEnabled == false && "single-node".equals(discoveryType) == false) { - throw new IllegalStateException("TLS setup is required for license type [" + license.operationMode().name() + "]"); + if (isTLSEnabled == false && "single-node".equals(discoveryType) == false + && XPackLicenseState.isTransportTlsRequired(license, settings)) { + throw new IllegalStateException("Transport TLS ([" + XPackSettings.TRANSPORT_SSL_ENABLED.getKey() + + "]) is required for license type [" + license.operationMode().description() + "] when security is enabled"); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index bc79fab0043aa..482f2d1e7cb23 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -80,7 +80,7 @@ public boolean available() { public boolean enabled() { if (licenseState != null) { return XPackSettings.SECURITY_ENABLED.get(settings) && - licenseState.isSecurityDisabledByTrialLicense() == false; + licenseState.isSecurityDisabledByLicenseDefaults() == false; } return false; } @@ -146,10 +146,18 @@ public void usage(ActionListener listener) { } static Map sslUsage(Settings settings) { - Map map = new HashMap<>(2); - map.put("http", singletonMap("enabled", HTTP_SSL_ENABLED.get(settings))); - map.put("transport", singletonMap("enabled", TRANSPORT_SSL_ENABLED.get(settings))); - return map; + // If security has been explicitly disabled in the settings, then SSL is also explicitly disabled, and we don't want to report + // these http/transport settings as they would be misleading (they could report `true` even though they were ignored) + // But, if security has not been explicitly configured, but has defaulted to off due to the current license type, + // then these SSL settings are still respected (that is SSL might be enabled, while the rest of security is disabled). + if (XPackSettings.SECURITY_ENABLED.get(settings)) { + Map map = new HashMap<>(2); + map.put("http", singletonMap("enabled", HTTP_SSL_ENABLED.get(settings))); + map.put("transport", singletonMap("enabled", TRANSPORT_SSL_ENABLED.get(settings))); + return map; + } else { + return Collections.emptyMap(); + } } static Map auditUsage(Settings settings) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java index 06d6446057bf3..b9c298023e04b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java @@ -111,8 +111,9 @@ public void app listener.onFailure(e); } } else if (SECURITY_ACTION_MATCHER.test(action)) { - if (licenseState.isSecurityDisabledByTrialLicense()) { - listener.onFailure(new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + + if (licenseState.isSecurityDisabledByLicenseDefaults()) { + listener.onFailure(new ElasticsearchException("Security must be explicitly enabled when using a [" + + licenseState.getOperationMode().description() + "] license. " + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + "and restart the node.")); } else { @@ -152,7 +153,15 @@ it to the action without an associated user (not via REST or transport - this is */ final String securityAction = actionMapper.action(action, request); authcService.authenticate(securityAction, request, SystemUser.INSTANCE, - ActionListener.wrap((authc) -> authorizeRequest(authc, securityAction, request, listener), listener::onFailure)); + ActionListener.wrap((authc) -> { + if (authc != null) { + authorizeRequest(authc, securityAction, request, listener); + } else if (licenseState.isAuthAllowed() == false) { + listener.onResponse(null); + } else { + listener.onFailure(new IllegalStateException("no authentication present but auth is allowed")); + } + }, listener::onFailure)); } private void authorizeRequest(Authentication authentication, String securityAction, Request request, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index a2e870febbdf6..dee12f4a6bd7f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -59,6 +59,7 @@ protected void doExecute(Task task, SamlAuthenticateRequest request, ActionListe listener.onFailure(new IllegalStateException("Cannot find AuthenticationResult on thread context")); return; } + assert authentication != null : "authentication should never be null at this point"; final Map tokenMeta = (Map) result.getMetadata().get(SamlRealm.CONTEXT_TOKEN_DATA); tokenService.createUserToken(authentication, originatingAuthentication, ActionListener.wrap(tuple -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index e0d304f77a071..5d5442803e3af 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -72,7 +72,11 @@ private void authenticateAndCreateToken(CreateTokenRequest request, ActionListen authenticationService.authenticate(CreateTokenAction.NAME, request, authToken, ActionListener.wrap(authentication -> { request.getPassword().close(); - createToken(request, authentication, originatingAuthentication, true, listener); + if (authentication != null) { + createToken(request, authentication, originatingAuthentication, true, listener); + } else { + listener.onFailure(new UnsupportedOperationException("cannot create token if authentication is not allowed")); + } }, e -> { // clear the request password request.getPassword().close(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java index e99b822e1dca1..569e8496aef83 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.audit; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.TransportMessage; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -72,4 +73,13 @@ void runAsDenied(String requestId, Authentication authentication, String action, void runAsDenied(String requestId, Authentication authentication, RestRequest request, AuthorizationInfo authorizationInfo); + /** + * This is a "workaround" method to log index "access_granted" and "access_denied" events for actions not tied to a + * {@code TransportMessage}, or when the connection is not 1:1, i.e. several audit events for an action associated with the same + * message. It is currently only used to audit the resolved index (alias) name for each {@code BulkItemRequest} comprised by a + * {@code BulkShardRequest}. We should strive to not use this and TODO refactor it out! + */ + void explicitIndexAccessEvent(String requestId, AuditLevel eventType, Authentication authentication, String action, String indices, + String requestName, TransportAddress remoteAddress, AuthorizationInfo authorizationInfo); + } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java index 38bb93d8bcf50..ddf6d49a632ae 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.audit; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.TransportMessage; @@ -222,4 +223,16 @@ public void runAsDenied(String requestId, Authentication authentication, RestReq } } } + + @Override + public void explicitIndexAccessEvent(String requestId, AuditLevel eventType, Authentication authentication, String action, + String indices, String requestName, TransportAddress remoteAddress, + AuthorizationInfo authorizationInfo) { + if (licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.explicitIndexAccessEvent(requestId, eventType, authentication, action, indices, requestName, remoteAddress, + authorizationInfo); + } + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 03d1d5045262f..f4e59f8c4dbf7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -443,6 +443,46 @@ public void accessGranted(String requestId, Authentication authentication, Strin } } + @Override + public void explicitIndexAccessEvent(String requestId, AuditLevel eventType, Authentication authentication, String action, String index, + String requestName, TransportAddress remoteAddress, AuthorizationInfo authorizationInfo) { + assert eventType == ACCESS_DENIED || eventType == AuditLevel.ACCESS_GRANTED || eventType == SYSTEM_ACCESS_GRANTED; + final String[] indices = index == null ? null : new String[] { index }; + final User user = authentication.getUser(); + final boolean isSystem = SystemUser.is(user) || XPackUser.is(user); + if (isSystem && eventType == ACCESS_GRANTED) { + eventType = SYSTEM_ACCESS_GRANTED; + } + if (events.contains(eventType)) { + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(user), Optional.of(effectiveRealmName(authentication)), + Optional.of(authorizationInfo), Optional.ofNullable(indices))) == false) { + final LogEntryBuilder logEntryBuilder = new LogEntryBuilder() + .with(EVENT_TYPE_FIELD_NAME, TRANSPORT_ORIGIN_FIELD_VALUE) + .with(EVENT_ACTION_FIELD_NAME, eventType == ACCESS_DENIED ? "access_denied" : "access_granted") + .with(ACTION_FIELD_NAME, action) + .with(REQUEST_NAME_FIELD_NAME, requestName) + .withRequestId(requestId) + .withSubject(authentication) + .with(INDICES_FIELD_NAME, indices) + .withOpaqueId(threadContext) + .withXForwardedFor(threadContext) + .with(authorizationInfo.asMap()); + final InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); + if (restAddress != null) { + logEntryBuilder + .with(ORIGIN_TYPE_FIELD_NAME, REST_ORIGIN_FIELD_VALUE) + .with(ORIGIN_ADDRESS_FIELD_NAME, NetworkAddress.format(restAddress)); + } else if (remoteAddress != null) { + logEntryBuilder + .with(ORIGIN_TYPE_FIELD_NAME, TRANSPORT_ORIGIN_FIELD_VALUE) + .with(ORIGIN_ADDRESS_FIELD_NAME, NetworkAddress.format(remoteAddress.address())); + } + logger.info(logEntryBuilder.build()); + } + } + } + @Override public void accessDenied(String requestId, Authentication authentication, String action, TransportMessage message, AuthorizationInfo authorizationInfo) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 212626ab41883..ac807c4200252 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; @@ -52,6 +53,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchHit; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; @@ -92,15 +95,16 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; public class ApiKeyService { private static final Logger logger = LogManager.getLogger(ApiKeyService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - private static final String TYPE = "doc"; static final String API_KEY_ID_KEY = "_security_api_key_id"; static final String API_KEY_ROLE_DESCRIPTORS_KEY = "_security_api_key_role_descriptors"; static final String API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY = "_security_api_key_limited_by_role_descriptors"; @@ -133,6 +137,7 @@ public class ApiKeyService { private final Clock clock; private final Client client; + private final XPackLicenseState licenseState; private final SecurityIndexManager securityIndex; private final ClusterService clusterService; private final Hasher hasher; @@ -146,10 +151,11 @@ public class ApiKeyService { private volatile long lastExpirationRunMs; - public ApiKeyService(Settings settings, Clock clock, Client client, SecurityIndexManager securityIndex, ClusterService clusterService, - ThreadPool threadPool) { + public ApiKeyService(Settings settings, Clock clock, Client client, XPackLicenseState licenseState, SecurityIndexManager securityIndex, + ClusterService clusterService, ThreadPool threadPool) { this.clock = clock; this.client = client; + this.licenseState = licenseState; this.securityIndex = securityIndex; this.clusterService = clusterService; this.enabled = XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.get(settings); @@ -174,10 +180,10 @@ public ApiKeyService(Settings settings, Clock clock, Client client, SecurityInde * Asynchronously creates a new API key based off of the request and authentication * @param authentication the authentication that this api key should be based off of * @param request the request to create the api key included any permission restrictions - * @param roleDescriptorSet the user's actual roles that we always enforce + * @param userRoles the user's actual roles that we always enforce * @param listener the listener that will be used to notify of completion */ - public void createApiKey(Authentication authentication, CreateApiKeyRequest request, Set roleDescriptorSet, + public void createApiKey(Authentication authentication, CreateApiKeyRequest request, Set userRoles, ActionListener listener) { ensureEnabled(); if (authentication == null) { @@ -188,86 +194,128 @@ public void createApiKey(Authentication authentication, CreateApiKeyRequest requ * this check is best effort as there could be two nodes executing search and * then index concurrently allowing a duplicate name. */ - findApiKeyForApiKeyName(request.getName(), true, true, ActionListener.wrap(apiKeyIds -> { - if (apiKeyIds.isEmpty()) { - final Instant created = clock.instant(); - final Instant expiration = getApiKeyExpiration(created, request); - final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); - final Version version = clusterService.state().nodes().getMinNodeVersion(); - if (version.before(Version.V_6_7_0)) { - logger.warn( - "nodes prior to the minimum supported version for api keys {} exist in the cluster;" - + " these nodes will not be able to use api keys", - Version.V_6_7_0); - } + checkDuplicateApiKeyNameAndCreateApiKey(authentication, request, userRoles, listener); + } + } - final char[] keyHash = hasher.hash(apiKey); - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - builder.startObject() - .field("doc_type", "api_key") - .field("creation_time", created.toEpochMilli()) - .field("expiration_time", expiration == null ? null : expiration.toEpochMilli()) - .field("api_key_invalidated", false); - - byte[] utf8Bytes = null; - try { - utf8Bytes = CharArrays.toUtf8Bytes(keyHash); - builder.field("api_key_hash").utf8Value(utf8Bytes, 0, utf8Bytes.length); - } finally { - if (utf8Bytes != null) { - Arrays.fill(utf8Bytes, (byte) 0); + private void checkDuplicateApiKeyNameAndCreateApiKey(Authentication authentication, CreateApiKeyRequest request, + Set userRoles, + ActionListener listener) { + final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("doc_type", "api_key")) + .filter(QueryBuilders.termQuery("name", request.getName())) + .filter(QueryBuilders.termQuery("api_key_invalidated", false)); + final BoolQueryBuilder expiredQuery = QueryBuilders.boolQuery() + .should(QueryBuilders.rangeQuery("expiration_time").lte(Instant.now().toEpochMilli())) + .should(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("expiration_time"))); + boolQuery.filter(expiredQuery); + + final SearchRequest searchRequest = client.prepareSearch(SECURITY_INDEX_NAME) + .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) + .setQuery(boolQuery) + .setVersion(false) + .setSize(1) + .request(); + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client, SECURITY_ORIGIN, SearchAction.INSTANCE, searchRequest, + ActionListener.wrap( + indexResponse -> { + if (indexResponse.getHits().getTotalHits().value > 0) { + listener.onFailure(traceLog("create api key", new ElasticsearchSecurityException( + "Error creating api key as api key with name [{}] already exists", request.getName()))); + } else { + createApiKeyAndIndexIt(authentication, request, userRoles, listener); } - } + }, + listener::onFailure))); + } + + private void createApiKeyAndIndexIt(Authentication authentication, CreateApiKeyRequest request, Set roleDescriptorSet, + ActionListener listener) { + final Instant created = clock.instant(); + final Instant expiration = getApiKeyExpiration(created, request); + final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); + final Version version = clusterService.state().nodes().getMinNodeVersion(); + if (version.before(Version.V_6_7_0)) { + logger.warn( + "nodes prior to the minimum supported version for api keys {} exist in the cluster;" + + " these nodes will not be able to use api keys", + Version.V_6_7_0); + } + + try (XContentBuilder builder = newDocument(apiKey, request.getName(), authentication, roleDescriptorSet, created, expiration, + request.getRoleDescriptors(), version)) { + final IndexRequest indexRequest = + client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME) + .setSource(builder) + .setRefreshPolicy(request.getRefreshPolicy()) + .request(); + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, indexRequest, + ActionListener.wrap( + indexResponse -> listener.onResponse( + new CreateApiKeyResponse(request.getName(), indexResponse.getId(), apiKey, expiration)), + listener::onFailure))); + } catch (IOException e) { + listener.onFailure(e); + } + } - // Save role_descriptors - builder.startObject("role_descriptors"); - if (request.getRoleDescriptors() != null && request.getRoleDescriptors().isEmpty() == false) { - for (RoleDescriptor descriptor : request.getRoleDescriptors()) { - builder.field(descriptor.getName(), - (contentBuilder, params) -> descriptor.toXContent(contentBuilder, params, true)); - } - } - builder.endObject(); + /** + * package protected for testing + */ + XContentBuilder newDocument(SecureString apiKey, String name, Authentication authentication, Set userRoles, + Instant created, Instant expiration, List keyRoles, + Version version) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject() + .field("doc_type", "api_key") + .field("creation_time", created.toEpochMilli()) + .field("expiration_time", expiration == null ? null : expiration.toEpochMilli()) + .field("api_key_invalidated", false); + + byte[] utf8Bytes = null; + final char[] keyHash = hasher.hash(apiKey); + try { + utf8Bytes = CharArrays.toUtf8Bytes(keyHash); + builder.field("api_key_hash").utf8Value(utf8Bytes, 0, utf8Bytes.length); + } finally { + if (utf8Bytes != null) { + Arrays.fill(utf8Bytes, (byte) 0); + } + Arrays.fill(keyHash, (char) 0); + } - // Save limited_by_role_descriptors - builder.startObject("limited_by_role_descriptors"); - for (RoleDescriptor descriptor : roleDescriptorSet) { - builder.field(descriptor.getName(), - (contentBuilder, params) -> descriptor.toXContent(contentBuilder, params, true)); - } - builder.endObject(); - - builder.field("name", request.getName()) - .field("version", version.id) - .startObject("creator") - .field("principal", authentication.getUser().principal()) - .field("metadata", authentication.getUser().metadata()) - .field("realm", authentication.getLookedUpBy() == null ? - authentication.getAuthenticatedBy().getName() : authentication.getLookedUpBy().getName()) - .endObject() - .endObject(); - final IndexRequest indexRequest = - client.prepareIndex(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE) - .setSource(builder) - .setRefreshPolicy(request.getRefreshPolicy()) - .request(); - securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> - executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, indexRequest, - ActionListener.wrap( - indexResponse -> listener.onResponse( - new CreateApiKeyResponse(request.getName(), indexResponse.getId(), apiKey, expiration)), - listener::onFailure))); - } catch (IOException e) { - listener.onFailure(e); - } finally { - Arrays.fill(keyHash, (char) 0); - } - } else { - listener.onFailure(traceLog("create api key", new ElasticsearchSecurityException( - "Error creating api key as api key with name [{}] already exists", request.getName()))); - } - }, listener::onFailure)); + + // Save role_descriptors + builder.startObject("role_descriptors"); + if (keyRoles != null && keyRoles.isEmpty() == false) { + for (RoleDescriptor descriptor : keyRoles) { + builder.field(descriptor.getName(), + (contentBuilder, params) -> descriptor.toXContent(contentBuilder, params, true)); + } + } + builder.endObject(); + + // Save limited_by_role_descriptors + builder.startObject("limited_by_role_descriptors"); + for (RoleDescriptor descriptor : userRoles) { + builder.field(descriptor.getName(), + (contentBuilder, params) -> descriptor.toXContent(contentBuilder, params, true)); } + builder.endObject(); + + builder.field("name", name) + .field("version", version.id) + .startObject("creator") + .field("principal", authentication.getUser().principal()) + .field("metadata", authentication.getUser().metadata()) + .field("realm", authentication.getLookedUpBy() == null ? + authentication.getAuthenticatedBy().getName() : authentication.getLookedUpBy().getName()) + .endObject() + .endObject(); + + return builder; } /** @@ -275,7 +323,7 @@ public void createApiKey(Authentication authentication, CreateApiKeyRequest requ * {@code ApiKey }. If found this will attempt to authenticate the key. */ void authenticateWithApiKeyIfPresent(ThreadContext ctx, ActionListener listener) { - if (enabled) { + if (isEnabled()) { final ApiKeyCredentials credentials; try { credentials = getCredentialsFromHeader(ctx); @@ -285,8 +333,10 @@ void authenticateWithApiKeyIfPresent(ThreadContext ctx, ActionListenerwrap(response -> { if (response.isExists()) { try (ApiKeyCredentials ignore = credentials) { @@ -535,7 +585,14 @@ private Instant getApiKeyExpiration(Instant now, CreateApiKeyRequest request) { } } + private boolean isEnabled() { + return enabled && licenseState.isApiKeyServiceAllowed(); + } + private void ensureEnabled() { + if (licenseState.isApiKeyServiceAllowed() == false) { + throw LicenseUtils.newComplianceException("api keys"); + } if (enabled == false) { throw new IllegalStateException("api keys are not enabled"); } @@ -692,8 +749,7 @@ private void findApiKeys(final BoolQueryBuilder boolQuery, boolean filterOutInva expiredQuery.should(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("expiration_time"))); boolQuery.filter(expiredQuery); } - - final SearchRequest request = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + final SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) .setQuery(boolQuery) .setVersion(false) @@ -766,9 +822,10 @@ private void indexInvalidation(Collection apiKeyIds, ActionListener E traceLog(String action, E exception) { return exception; } + // pkg scoped for testing boolean isExpirationInProgress() { return expiredApiKeysRemover.isExpirationInProgress(); } + // pkg scoped for testing + long lastTimeWhenApiKeysRemoverWasTriggered() { + return lastExpirationRunMs; + } + private void maybeStartApiKeyRemover() { if (securityIndex.isAvailable()) { if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index 8fb5abda10c54..1fe3ed67f7337 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.collect.Tuple; @@ -35,6 +36,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.EmptyAuthorizationInfo; import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; @@ -43,7 +45,6 @@ import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.authc.support.RealmUserLookup; -import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.EmptyAuthorizationInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; @@ -196,8 +197,9 @@ class Authenticator { private final AuditableRequest request; private final User fallbackUser; - + private final List defaultOrderedRealmList; private final ActionListener listener; + private RealmRef authenticatedBy = null; private RealmRef lookedupBy = null; private AuthenticationToken authenticationToken = null; @@ -215,6 +217,7 @@ class Authenticator { private Authenticator(AuditableRequest auditableRequest, User fallbackUser, ActionListener listener) { this.request = auditableRequest; this.fallbackUser = fallbackUser; + this.defaultOrderedRealmList = realms.asList(); this.listener = listener; } @@ -233,27 +236,33 @@ private Authenticator(AuditableRequest auditableRequest, User fallbackUser, Acti * */ private void authenticateAsync() { - lookForExistingAuthentication((authentication) -> { - if (authentication != null) { - listener.onResponse(authentication); - } else { - tokenService.getAndValidateToken(threadContext, ActionListener.wrap(userToken -> { - if (userToken != null) { - writeAuthToContext(userToken.getAuthentication()); - } else { - checkForApiKey(); - } - }, e -> { - if (e instanceof ElasticsearchSecurityException && + if (defaultOrderedRealmList.isEmpty()) { + // this happens when the license state changes between the call to authenticate and the actual invocation + // to get the realm list + listener.onResponse(null); + } else { + lookForExistingAuthentication((authentication) -> { + if (authentication != null) { + listener.onResponse(authentication); + } else { + tokenService.getAndValidateToken(threadContext, ActionListener.wrap(userToken -> { + if (userToken != null) { + writeAuthToContext(userToken.getAuthentication()); + } else { + checkForApiKey(); + } + }, e -> { + if (e instanceof ElasticsearchSecurityException && tokenService.isExpiredTokenException((ElasticsearchSecurityException) e) == false) { - // intentionally ignore the returned exception; we call this primarily - // for the auditing as we already have a purpose built exception - request.tamperedRequest(); - } - listener.onFailure(e); - })); - } - }); + // intentionally ignore the returned exception; we call this primarily + // for the auditing as we already have a purpose built exception + request.tamperedRequest(); + } + listener.onFailure(e); + })); + } + }); + } } private void checkForApiKey() { @@ -320,7 +329,7 @@ void extractToken(Consumer consumer) { if (authenticationToken != null) { action = () -> consumer.accept(authenticationToken); } else { - for (Realm realm : realms) { + for (Realm realm : defaultOrderedRealmList) { final AuthenticationToken token = realm.token(threadContext); if (token != null) { action = () -> consumer.accept(token); @@ -388,6 +397,7 @@ private void consumeToken(AuthenticationToken token) { userListener.onResponse(null); } }; + final IteratingActionListener authenticatingListener = new IteratingActionListener<>(ContextPreservingActionListener.wrapPreservingContext(ActionListener.wrap( (user) -> consumeUser(user, messages), @@ -402,24 +412,24 @@ private void consumeToken(AuthenticationToken token) { } private List getRealmList(String principal) { - final List defaultOrderedRealms = realms.asList(); + final List orderedRealmList = this.defaultOrderedRealmList; if (lastSuccessfulAuthCache != null) { final Realm lastSuccess = lastSuccessfulAuthCache.get(principal); if (lastSuccess != null) { - final int index = defaultOrderedRealms.indexOf(lastSuccess); + final int index = orderedRealmList.indexOf(lastSuccess); if (index > 0) { - final List smartOrder = new ArrayList<>(defaultOrderedRealms.size()); + final List smartOrder = new ArrayList<>(orderedRealmList.size()); smartOrder.add(lastSuccess); - for (int i = 1; i < defaultOrderedRealms.size(); i++) { + for (int i = 1; i < orderedRealmList.size(); i++) { if (i != index) { - smartOrder.add(defaultOrderedRealms.get(i)); + smartOrder.add(orderedRealmList.get(i)); } } return Collections.unmodifiableList(smartOrder); } } } - return defaultOrderedRealms; + return orderedRealmList; } /** @@ -475,6 +485,13 @@ private void consumeUser(User user, Map> message final String cause = tuple.v2() == null ? "" : " (Caused by " + tuple.v2() + ")"; logger.warn("Authentication to realm {} failed - {}{}", realm.name(), message, cause); }); + List unlicensedRealms = realms.getUnlicensedRealms(); + if (unlicensedRealms.isEmpty() == false) { + logger.warn("Authentication failed using realms [{}]." + + " Realms [{}] were skipped because they are not permitted on the current license", + Strings.collectionToCommaDelimitedString(defaultOrderedRealmList), + Strings.collectionToCommaDelimitedString(unlicensedRealms)); + } listener.onFailure(request.authenticationFailed(authenticationToken)); } else { threadContext.putTransient(AuthenticationResult.THREAD_CONTEXT_KEY, authenticationResult); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java index b9430681d7f43..2cb9969337f0a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -25,8 +24,8 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import java.time.Duration; import java.time.Instant; -import java.time.temporal.ChronoUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; @@ -37,6 +36,8 @@ * Responsible for cleaning the invalidated and expired API keys from the security index. */ public final class ExpiredApiKeysRemover extends AbstractRunnable { + public static final Duration EXPIRED_API_KEYS_RETENTION_PERIOD = Duration.ofDays(7L); + private static final Logger logger = LogManager.getLogger(ExpiredApiKeysRemover.class); private final Client client; @@ -60,11 +61,10 @@ public void doRun() { .setQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termsQuery("doc_type", "api_key")) .should(QueryBuilders.termsQuery("api_key_invalidated", true)) - .should(QueryBuilders.rangeQuery("expiration_time").lte(now.minus(7L, ChronoUnit.DAYS).toEpochMilli())) + .should(QueryBuilders.rangeQuery("expiration_time").lte(now.minus(EXPIRED_API_KEYS_RETENTION_PERIOD).toEpochMilli())) .minimumShouldMatch(1) ); - logger.trace(() -> new ParameterizedMessage("Removing old api keys: [{}]", Strings.toString(expiredDbq))); executeAsyncWithOrigin(client, SECURITY_ORIGIN, DeleteByQueryAction.INSTANCE, expiredDbq, ActionListener.wrap(r -> { debugDbqResponse(r); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java index 54bffd8a21566..089b832d74d58 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java @@ -74,7 +74,7 @@ static boolean isXPackRealm(String type) { return ReservedRealm.TYPE.equals(type); } - static Collection getConfigurableRealmsTypes() { + public static Collection getConfigurableRealmsTypes() { return Collections.unmodifiableSet(XPACK_TYPES); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 925654fae8bbf..39b981b42e310 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -5,24 +5,8 @@ */ package org.elasticsearch.xpack.security.authc; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; @@ -39,6 +23,21 @@ import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + /** * Serves as a realms registry (also responsible for ordering the realms appropriately) */ @@ -119,6 +118,32 @@ public Iterator iterator() { } } + /** + * Returns a list of realms that are configured, but are not permitted under the current license. + */ + public List getUnlicensedRealms() { + // If auth is not allowed, then everything is unlicensed + if (licenseState.isAuthAllowed() == false) { + return Collections.unmodifiableList(realms); + } + + AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + // If all realms are allowed, then nothing is unlicensed + if (allowedRealmType == AllowedRealmType.ALL) { + return Collections.emptyList(); + } + + final List allowedRealms = this.asList(); + // Shortcut for the typical case, all the configured realms are allowed + if (allowedRealms.equals(this.realms.size())) { + return Collections.emptyList(); + } + + // Otherwise, we return anything in "all realms" that is not in the allowed realm list + List unlicensed = realms.stream().filter(r -> allowedRealms.contains(r) == false).collect(Collectors.toList()); + return Collections.unmodifiableList(unlicensed); + } + public Stream stream() { return StreamSupport.stream(this.spliterator(), false); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 8b9fda5b9c3f3..35a6d14e0d9b6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -67,6 +67,8 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.XPackField; @@ -126,9 +128,11 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; /** * Service responsible for the creation, validation, and other management of {@link UserToken} @@ -154,7 +158,6 @@ public final class TokenService { "\", error=\"invalid_token\", error_description=\"The access token expired\""; private static final String MALFORMED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + "\", error=\"invalid_token\", error_description=\"The access token is malformed\""; - private static final String TYPE = "doc"; public static final String THREAD_POOL_NAME = XPackField.SECURITY + "-token-key"; public static final Setting TOKEN_EXPIRATION = Setting.timeSetting("xpack.security.authc.token.timeout", @@ -181,6 +184,7 @@ public final class TokenService { private final SecurityIndexManager securityIndex; private final ExpiredTokenRemover expiredTokenRemover; private final boolean enabled; + private final XPackLicenseState licenseState; private volatile TokenKeys keyCache; private volatile long lastExpirationRunMs; private final AtomicLong createdTimeStamps = new AtomicLong(-1); @@ -192,7 +196,7 @@ public final class TokenService { * @param clock the clock that will be used for comparing timestamps * @param client the client to use when checking for revocations */ - public TokenService(Settings settings, Clock clock, Client client, + public TokenService(Settings settings, Clock clock, Client client, XPackLicenseState licenseState, SecurityIndexManager securityIndex, ClusterService clusterService) throws GeneralSecurityException { byte[] saltArr = new byte[SALT_BYTES]; secureRandom.nextBytes(saltArr); @@ -202,6 +206,7 @@ public TokenService(Settings settings, Clock clock, Client client, this.clock = clock.withZone(ZoneOffset.UTC); this.expirationDelay = TOKEN_EXPIRATION.get(settings); this.client = client; + this.licenseState = licenseState; this.securityIndex = securityIndex; this.lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); this.deleteInterval = DELETE_INTERVAL.get(settings); @@ -266,7 +271,7 @@ public void createUserToken(Authentication authentication, Authentication origin builder.endObject(); final String documentId = getTokenDocumentId(userToken); IndexRequest request = - client.prepareIndex(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, documentId) + client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, documentId) .setOpType(OpType.CREATE) .setSource(builder) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) @@ -286,7 +291,7 @@ public void createUserToken(Authentication authentication, Authentication origin * has not been revoked or is expired. */ void getAndValidateToken(ThreadContext ctx, ActionListener listener) { - if (enabled) { + if (isEnabled()) { final String token = getFromHeader(ctx); if (token == null) { listener.onResponse(null); @@ -362,7 +367,7 @@ void decodeToken(String token, ActionListener listener) throws IOExce securityIndex.checkIndexVersionThenExecute( ex -> listener.onFailure(traceLog("prepare security index", tokenId, ex)), () -> { - final GetRequest getRequest = client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, + final GetRequest getRequest = client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getTokenDocumentId(tokenId)).request(); Consumer onFailure = ex -> listener.onFailure(traceLog("decode token", tokenId, ex)); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, @@ -586,10 +591,11 @@ private void indexInvalidation(Collection tokenIds, ActionListener onFailure = ex -> listener.onFailure(traceLog("refresh token", tokenDocId, ex)); - GetRequest getRequest = client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId).request(); + GetRequest getRequest = client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, tokenDocId).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, ActionListener.wrap(response -> { if (response.isExists()) { @@ -747,7 +753,7 @@ private void innerRefresh(String tokenDocId, Authentication userAuth, ActionList in.setVersion(authVersion); Authentication authentication = new Authentication(in); UpdateRequestBuilder updateRequest = - client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, tokenDocId) .setDoc("refresh_token", Collections.singletonMap("refreshed", true)) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL); updateRequest.setIfSeqNo(response.getSeqNo()); @@ -872,7 +878,7 @@ public void findActiveTokensForRealm(String realmName, ActionListener listener.onResponse(null); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { - final GetRequest getRequest = client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, - getTokenDocumentId(userToken)).request(); + final GetRequest getRequest = client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getTokenDocumentId(userToken)) + .request(); Consumer onFailure = ex -> listener.onFailure(traceLog("check token state", userToken.getId(), ex)); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, ActionListener.wrap(response -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 3a6fb0ea3b341..a0c579dd881ce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -62,10 +62,10 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; /** @@ -77,7 +77,6 @@ */ public class NativeUsersStore { - public static final String INDEX_TYPE = "doc"; static final String USER_DOC_TYPE = "user"; public static final String RESERVED_USER_TYPE = "reserved-user"; private static final Logger logger = LogManager.getLogger(NativeUsersStore.class); @@ -143,10 +142,10 @@ public void getUsers(String[] userNames, final ActionListener> query = QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE); } else { final String[] users = Arrays.stream(userNames).map(s -> getIdForUser(USER_DOC_TYPE, s)).toArray(String[]::new); - query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(INDEX_TYPE).addIds(users)); + query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery().addIds(users)); } final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) { SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) .setQuery(query) @@ -206,8 +205,7 @@ private void getUserAndPassword(final String user, final ActionListener executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, - INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(), + client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(USER_DOC_TYPE, user)).request(), new ActionListener() { @Override public void onResponse(GetResponse response) { @@ -247,7 +245,7 @@ public void changePassword(final ChangePasswordRequest request, final ActionList securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(docType, username)) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(docType, username)) .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(), String.valueOf(request.passwordHash())) .setRefreshPolicy(request.getRefreshPolicy()).request(), @@ -285,11 +283,9 @@ public void onFailure(Exception e) { private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener listener) { securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareIndex(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(RESERVED_USER_TYPE, username)) - .setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash), - Fields.ENABLED.getPreferredName(), true, - Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE) + client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(RESERVED_USER_TYPE, username)) + .setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash), Fields.ENABLED.getPreferredName(), + true, Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE) .setRefreshPolicy(refresh).request(), new ActionListener() { @Override @@ -327,8 +323,7 @@ private void updateUserWithoutPassword(final PutUserRequest putUserRequest, fina // We must have an existing document securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(USER_DOC_TYPE, putUserRequest.username())) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(USER_DOC_TYPE, putUserRequest.username())) .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.USERNAME.getPreferredName(), putUserRequest.username(), Fields.ROLES.getPreferredName(), putUserRequest.roles(), @@ -372,8 +367,7 @@ private void indexUser(final PutUserRequest putUserRequest, final ActionListener assert putUserRequest.passwordHash() != null; securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareIndex(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(USER_DOC_TYPE, putUserRequest.username())) + client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(USER_DOC_TYPE, putUserRequest.username())) .setSource(Fields.USERNAME.getPreferredName(), putUserRequest.username(), Fields.PASSWORD.getPreferredName(), String.valueOf(putUserRequest.passwordHash()), Fields.ROLES.getPreferredName(), putUserRequest.roles(), @@ -416,8 +410,7 @@ private void setRegularUserEnabled(final String username, final boolean enabled, final ActionListener listener) { securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(USER_DOC_TYPE, username)) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(USER_DOC_TYPE, username)) .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled) .setRefreshPolicy(refreshPolicy) .request(), @@ -451,8 +444,7 @@ private void setReservedUserEnabled(final String username, final boolean enabled boolean clearCache, final ActionListener listener) { securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(RESERVED_USER_TYPE, username)) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(RESERVED_USER_TYPE, username)) .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled) .setUpsert(XContentType.JSON, Fields.PASSWORD.getPreferredName(), "", @@ -486,8 +478,9 @@ public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionLi listener.onFailure(frozenSecurityIndex.getUnavailableReason()); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { - DeleteRequest request = client.prepareDelete(SECURITY_INDEX_NAME, - INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request(); + DeleteRequest request = client + .prepareDelete(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())) + .request(); request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy()); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, new ActionListener() { @@ -533,8 +526,8 @@ void getReservedUserInfo(String username, ActionListener liste } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, INDEX_TYPE, - getIdForUser(RESERVED_USER_TYPE, username)).request(), + client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForUser(RESERVED_USER_TYPE, username)) + .request(), new ActionListener() { @Override public void onResponse(GetResponse getResponse) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java index cb0bef382071b..7e9d5a8a32657 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.io.UncheckedIOException; import java.net.HttpURLConnection; import java.net.InetAddress; import java.net.URL; @@ -154,13 +153,13 @@ String getDefaultURL() { // this sucks but a port can be specified with a value of 0, we'll never be able to connect to it so just default to // what we know if (port <= 0) { - throw new IllegalStateException("unable to determine http port from settings, please use the -u option to provide the" + - " url"); + throw new IllegalStateException("unable to determine http port from settings"); } } return scheme + "://" + InetAddresses.toUriString(publishAddress) + ":" + port; - } catch (IOException e) { - throw new UncheckedIOException("failed to resolve default URL", e); + } catch (Exception e) { + throw new IllegalStateException("unable to determine default URL from settings, please use the -u option to explicitly " + + "provide the url", e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java index 015cb1f8b186d..6dd8a971d00b6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java @@ -106,8 +106,7 @@ private SamlAttributes authenticateResponse(Element element, Collection throw samlException("SAML Response has no status code"); } if (isSuccess(status) == false) { - throw samlException("SAML Response is not a 'success' response: Code={} Message={} Detail={}", - status.getStatusCode().getValue(), getMessage(status), getDetail(status)); + throw samlException("SAML Response is not a 'success' response: {}", getStatusCodeMessage(status)); } checkIssuer(response.getIssuer(), response); checkResponseDestination(response); @@ -137,6 +136,32 @@ private SamlAttributes authenticateResponse(Element element, Collection return new SamlAttributes(nameId, session, attributes); } + private String getStatusCodeMessage(Status status) { + StatusCode firstLevel = status.getStatusCode(); + StatusCode subLevel = firstLevel.getStatusCode(); + StringBuilder sb = new StringBuilder(); + if (StatusCode.REQUESTER.equals(firstLevel.getValue())) { + sb.append("The SAML IdP did not grant the request. It indicated that the Elastic Stack side sent something invalid ("); + } else if (StatusCode.RESPONDER.equals(firstLevel.getValue())) { + sb.append("The request could not be granted due to an error in the SAML IDP side ("); + } else if (StatusCode.VERSION_MISMATCH.equals(firstLevel.getValue())) { + sb.append("The request could not be granted because the SAML IDP doesn't support SAML 2.0 ("); + } else { + sb.append("The request could not be granted, the SAML IDP responded with a non-standard Status code ("); + } + sb.append(firstLevel.getValue()).append(")."); + if (getMessage(status) != null) { + sb.append(" Message: [").append(getMessage(status)).append("]"); + } + if (getDetail(status) != null) { + sb.append(" Detail: [").append(getDetail(status)).append("]"); + } + if (null != subLevel) { + sb.append(" Specific status code which might indicate what the issue is: [").append(subLevel.getValue()).append("]"); + } + return sb.toString(); + } + private String getMessage(Status status) { final StatusMessage sm = status.getStatusMessage(); return sm == null ? null : sm.getMessage(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java index e55530bb5def0..dbc323810610f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java @@ -79,8 +79,12 @@ public UserData(String username, @Nullable String dn, Collection groups, public ExpressionModel asModel() { final ExpressionModel model = new ExpressionModel(); model.defineField("username", username); - model.defineField("dn", dn, new DistinguishedNamePredicate(dn)); + if (dn != null) { + // null dn fields get the default NULL_PREDICATE + model.defineField("dn", dn, new DistinguishedNamePredicate(dn)); + } model.defineField("groups", groups, groups.stream() + .filter(group -> group != null) .>map(DistinguishedNamePredicate::new) .reduce(Predicate::or) .orElse(fieldValue -> false) @@ -165,22 +169,19 @@ class DistinguishedNamePredicate implements Predicate NO_OP_ACTION_LISTENER = new ActionListener() { @Override public void onResponse(Object o) { @@ -130,10 +128,9 @@ void loadMappings(ActionListener> listener) { } final QueryBuilder query = QueryBuilders.termQuery(DOC_TYPE_FIELD, DOC_TYPE_ROLE_MAPPING); final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) { SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) - .setTypes(SECURITY_GENERIC_TYPE) .setQuery(query) .setSize(1000) .setFetchSource(true) @@ -203,7 +200,7 @@ private void innerPutMapping(PutRoleMappingRequest request, ActionListener { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareDelete(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(request.getName())) + client.prepareDelete(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForName(request.getName())) .setRefreshPolicy(request.getRefreshPolicy()) .request(), new ActionListener() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index acaf152628e8b..c05d35e8408c6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -61,6 +61,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.audit.AuditLevel; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.authz.interceptor.RequestInterceptor; @@ -308,10 +309,12 @@ private void handleIndexActionAuthorizationResult(final IndexAuthorizationResult // if this is performing multiple actions on the index, then check each of those actions. assert request instanceof BulkShardRequest : "Action " + action + " requires " + BulkShardRequest.class + " but was " + request.getClass(); - authorizeBulkItems(requestInfo, authzInfo, authzEngine, resolvedIndicesAsyncSupplier, authorizedIndicesSupplier, - metaData, requestId, - ActionListener.wrap(ignore -> runRequestInterceptors(requestInfo, authzInfo, authorizationEngine, listener), - listener::onFailure)); + authorizeBulkItems(requestInfo, authzInfo, authzEngine, resolvedIndicesAsyncSupplier, authorizedIndicesSupplier, metaData, + requestId, + wrapPreservingContext( + ActionListener.wrap(ignore -> runRequestInterceptors(requestInfo, authzInfo, authorizationEngine, listener), + listener::onFailure), + threadContext)); } else { runRequestInterceptors(requestInfo, authzInfo, authorizationEngine, listener); } @@ -493,14 +496,16 @@ private void authorizeBulkItems(RequestInfo requestInfo, AuthorizationInfo authz for (BulkItemRequest item : request.items()) { final String resolvedIndex = resolvedIndexNames.get(item.index()); final String itemAction = getAction(item); - final IndicesAccessControl indicesAccessControl = actionToIndicesAccessControl.get(getAction(item)); + final IndicesAccessControl indicesAccessControl = actionToIndicesAccessControl.get(itemAction); final IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(resolvedIndex); if (indexAccessControl == null || indexAccessControl.isGranted() == false) { - auditTrail.accessDenied(requestId, authentication, itemAction, request, authzInfo); + auditTrail.explicitIndexAccessEvent(requestId, AuditLevel.ACCESS_DENIED, authentication, itemAction, + resolvedIndex, item.getClass().getSimpleName(), request.remoteAddress(), authzInfo); item.abort(resolvedIndex, denialException(authentication, itemAction, null)); } else if (audit.get()) { - auditTrail.accessGranted(requestId, authentication, itemAction, request, authzInfo); + auditTrail.explicitIndexAccessEvent(requestId, AuditLevel.ACCESS_GRANTED, authentication, itemAction, + resolvedIndex, item.getClass().getSimpleName(), request.remoteAddress(), authzInfo); } } listener.onResponse(null); @@ -521,7 +526,7 @@ private void authorizeBulkItems(RequestInfo requestInfo, AuthorizationInfo authz }, listener::onFailure)); } - private IllegalArgumentException illegalArgument(String message) { + private static IllegalArgumentException illegalArgument(String message) { assert false : message; return new IllegalArgumentException(message); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 03c78ed903e81..6e0c2ed0bb101 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest; import org.elasticsearch.transport.RemoteClusterAware; @@ -248,7 +249,17 @@ static String getPutMappingIndexOrAlias(PutMappingRequest request, List Optional foundAlias = aliasMetaData.stream() .map(AliasMetaData::alias) .filter(authorizedIndicesList::contains) - .filter(aliasName -> metaData.getAliasAndIndexLookup().get(aliasName).getIndices().size() == 1) + .filter(aliasName -> { + AliasOrIndex alias = metaData.getAliasAndIndexLookup().get(aliasName); + List indexMetadata = alias.getIndices(); + if (indexMetadata.size() == 1) { + return true; + } else { + assert alias instanceof AliasOrIndex.Alias; + IndexMetaData idxMeta = ((AliasOrIndex.Alias) alias).getWriteIndex(); + return idxMeta != null && idxMeta.getIndex().getName().equals(concreteIndexName); + } + }) .findFirst(); resolvedAliasOrIndex = foundAlias.orElse(concreteIndexName); } else { @@ -438,7 +449,8 @@ private RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress, boolean compressionEnabled, + TimeValue pingSchedule) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java index 78058080e5b17..daadac78ae487 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java @@ -24,8 +24,9 @@ import java.util.Set; /** - * Opts out of the query cache if field level security is active for the current request, - * and its unsafe to cache. + * Opts out of the query cache if field level security is active for the current request, and it is unsafe to cache. Note that the method + * {@link #listenForLicenseStateChanges()} must be invoked after construction of the query cache and before any other public methods are + * invoked on this query cache. */ public final class OptOutQueryCache extends AbstractIndexComponent implements LicenseStateListener, QueryCache { @@ -33,6 +34,7 @@ public final class OptOutQueryCache extends AbstractIndexComponent implements Li private final ThreadContext context; private final String indexName; private final XPackLicenseState licenseState; + private volatile boolean licenseStateListenerRegistered; public OptOutQueryCache( final IndexSettings indexSettings, @@ -44,28 +46,46 @@ public OptOutQueryCache( this.context = Objects.requireNonNull(context, "threadContext must not be null"); this.indexName = indexSettings.getIndex().getName(); this.licenseState = Objects.requireNonNull(licenseState, "licenseState"); + } + + /** + * Register this query cache to listen for license state changes. This must be done after construction of this query cache before any + * other public methods are invoked on this query cache. + */ + public void listenForLicenseStateChanges() { + /* + * Registering this as a listener can not be done in the constructor because otherwise it would be unsafe publication of this. That + * is, it would expose this to another thread before the constructor had finished. Therefore, we have a dedicated method to register + * the listener that is invoked after the constructor has returned. + */ + assert licenseStateListenerRegistered == false; licenseState.addListener(this); + licenseStateListenerRegistered = true; } @Override public void close() throws ElasticsearchException { + assert licenseStateListenerRegistered; licenseState.removeListener(this); clear("close"); } @Override public void licenseStateChanged() { + assert licenseStateListenerRegistered; clear("license state changed"); } @Override - public void clear(String reason) { + public void clear(final String reason) { + assert licenseStateListenerRegistered; logger.debug("full cache clear, reason [{}]", reason); indicesQueryCache.clearIndex(index().getName()); } @Override public Weight doCache(Weight weight, QueryCachingPolicy policy) { + assert licenseStateListenerRegistered; if (licenseState.isAuthAllowed() == false) { logger.debug("not opting out of the query cache; authorization is not allowed"); return indicesQueryCache.doCache(weight, policy); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 63b3ba2c9bba7..52df8a9889d4a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -56,10 +56,10 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.DOC_TYPE_VALUE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; @@ -118,10 +118,10 @@ public void getPrivileges(Collection applications, Collection na final String[] docIds = applications.stream() .flatMap(a -> names.stream().map(n -> toDocId(a, n))) .toArray(String[]::new); - query = QueryBuilders.boolQuery().filter(typeQuery).filter(QueryBuilders.idsQuery("doc").addIds(docIds)); + query = QueryBuilders.boolQuery().filter(typeQuery).filter(QueryBuilders.idsQuery().addIds(docIds)); } final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) { SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) .setQuery(query) @@ -151,7 +151,8 @@ void getPrivilege(String application, String name, ActionListener executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, "doc", toDocId(application, name)).request(), + client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, toDocId(application, name)) + .request(), new ActionListener() { @Override public void onResponse(GetResponse response) { @@ -202,7 +203,7 @@ private void innerPutPrivilege(ApplicationPrivilegeDescriptor privilege, WriteRe final String name = privilege.getName(); final XContentBuilder xContentBuilder = privilege.toXContent(jsonBuilder(), true); ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareIndex(SECURITY_INDEX_NAME, "doc", toDocId(privilege.getApplication(), name)) + client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, toDocId(privilege.getApplication(), name)) .setSource(xContentBuilder) .setRefreshPolicy(refreshPolicy) .request(), listener, client::index); @@ -233,7 +234,7 @@ public void deletePrivileges(String application, Collection names, Write }, listener::onFailure), names.size(), Collections.emptyList()); for (String name : names) { ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareDelete(SECURITY_INDEX_NAME, "doc", toDocId(application, name)) + client.prepareDelete(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, toDocId(application, name)) .setRefreshPolicy(refreshPolicy) .request(), groupListener, client::delete); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index cbc66235d305b..8bd4f9cc89859 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -63,11 +63,11 @@ import java.util.function.Supplier; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; @@ -87,7 +87,6 @@ public class NativeRolesStore implements BiConsumer, ActionListener< Setting.intSetting(setting("authz.store.roles.index.cache.max_size"), 10000, Property.NodeScope, Property.Deprecated); private static final Setting CACHE_TTL_SETTING = Setting.timeSetting(setting("authz.store.roles.index.cache.ttl"), TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated); - private static final String ROLE_DOC_TYPE = "doc"; private static final Logger logger = LogManager.getLogger(NativeRolesStore.class); private final Settings settings; @@ -114,14 +113,17 @@ public void accept(Set names, ActionListener listen * Retrieve a list of roles, if rolesToGet is null or empty, fetch all roles */ public void getRoleDescriptors(Set names, final ActionListener listener) { - if (securityIndex.indexExists() == false) { + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + if (frozenSecurityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! listener.onResponse(RoleRetrievalResult.success(Collections.emptySet())); + } else if (frozenSecurityIndex.isAvailable() == false) { + listener.onResponse(RoleRetrievalResult.failure(frozenSecurityIndex.getUnavailableReason())); } else if (names == null || names.isEmpty()) { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { QueryBuilder query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) { SearchRequest request = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setScroll(DEFAULT_KEEPALIVE_SETTING.get(settings)) .setQuery(query) @@ -140,7 +142,7 @@ public void getRoleDescriptors(Set names, final ActionListener { final String[] roleIds = names.stream().map(NativeRolesStore::getIdForRole).toArray(String[]::new); - MultiGetRequest multiGetRequest = client.prepareMultiGet().add(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, roleIds).request(); + MultiGetRequest multiGetRequest = client.prepareMultiGet().add(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, roleIds).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, multiGetRequest, ActionListener.wrap(mGetResponse -> { final MultiGetItemResponse[] responses = mGetResponse.getResponses(); @@ -176,8 +178,8 @@ public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionLi listener.onFailure(frozenSecurityIndex.getUnavailableReason()); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { - DeleteRequest request = client.prepareDelete(SecurityIndexManager.SECURITY_INDEX_NAME, - ROLE_DOC_TYPE, getIdForRole(deleteRoleRequest.name())).request(); + DeleteRequest request = client + .prepareDelete(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForRole(deleteRoleRequest.name())).request(); request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, new ActionListener() { @@ -217,7 +219,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final listener.onFailure(e); return; } - final IndexRequest indexRequest = client.prepareIndex(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForRole(role.getName())) + final IndexRequest indexRequest = client.prepareIndex(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForRole(role.getName())) .setSource(xContentBuilder) .setRefreshPolicy(request.getRefreshPolicy()) .request(); @@ -311,17 +313,20 @@ public String toString() { } private void getRoleDescriptor(final String roleId, ActionListener resultListener) { - if (securityIndex.indexExists() == false) { + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + if (frozenSecurityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! resultListener.onResponse(RoleRetrievalResult.success(Collections.emptySet())); + } else if (frozenSecurityIndex.isAvailable() == false) { + resultListener.onResponse(RoleRetrievalResult.failure(frozenSecurityIndex.getUnavailableReason())); } else { - securityIndex.prepareIndexIfNeededThenExecute(e -> resultListener.onResponse(RoleRetrievalResult.failure(e)), () -> - executeGetRoleRequest(roleId, new ActionListener() { + securityIndex.checkIndexVersionThenExecute(e -> resultListener.onResponse(RoleRetrievalResult.failure(e)), + () -> executeGetRoleRequest(roleId, new ActionListener() { @Override public void onResponse(GetResponse response) { final RoleDescriptor descriptor = transformRole(response); - resultListener.onResponse(RoleRetrievalResult.success( - descriptor == null ? Collections.emptySet() : Collections.singleton(descriptor))); + resultListener.onResponse(RoleRetrievalResult + .success(descriptor == null ? Collections.emptySet() : Collections.singleton(descriptor))); } @Override @@ -335,8 +340,7 @@ public void onFailure(Exception e) { private void executeGetRoleRequest(String role, ActionListener listener) { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, - ROLE_DOC_TYPE, getIdForRole(role)).request(), + client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, getIdForRole(role)).request(), listener, client::get)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java index c2066996f9ce4..801902d5b9623 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java @@ -71,8 +71,9 @@ protected Exception checkFeatureAvailable(RestRequest request) { return new IllegalStateException("Security is not enabled but a security rest handler is registered"); } else if (licenseState.isSecurityAvailable() == false) { return LicenseUtils.newComplianceException(XPackField.SECURITY); - } else if (licenseState.isSecurityDisabledByTrialLicense()) { - return new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + + } else if (licenseState.isSecurityDisabledByLicenseDefaults()) { + return new ElasticsearchException("Security must be explicitly enabled when using a [" + + licenseState.getOperationMode().description() + "] license. " + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + "and restart the node."); } else { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java new file mode 100644 index 0000000000000..4d797bd543dc3 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandler.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.rest.action.apikey; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +/** + * A base rest handler that handles licensing for ApiKey actions + */ +abstract class ApiKeyBaseRestHandler extends SecurityBaseRestHandler { + ApiKeyBaseRestHandler(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (licenseState.isApiKeyServiceAllowed()) { + return null; + } else { + logger.info("API Keys are not available under the current [{}] license", licenseState.getOperationMode().description()); + return LicenseUtils.newComplianceException("api keys"); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java similarity index 94% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java index 2e3ced0d8933f..14d4726553dff 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.node.NodeClient; @@ -24,7 +24,7 @@ /** * Rest action to create an API key */ -public final class RestCreateApiKeyAction extends SecurityBaseRestHandler { +public final class RestCreateApiKeyAction extends ApiKeyBaseRestHandler { /** * @param settings the node's settings diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java similarity index 95% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java index ec0bd7bd9fd31..71ed5a06efb65 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; @@ -26,7 +26,7 @@ /** * Rest action to get one or more API keys information. */ -public final class RestGetApiKeyAction extends SecurityBaseRestHandler { +public final class RestGetApiKeyAction extends ApiKeyBaseRestHandler { public RestGetApiKeyAction(Settings settings, RestController controller, XPackLicenseState licenseState) { super(settings, licenseState); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java similarity index 95% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyAction.java rename to x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java index eb10ec6669e32..b11a0edde42f8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; @@ -28,7 +28,7 @@ /** * Rest action to invalidate one or more API keys */ -public final class RestInvalidateApiKeyAction extends SecurityBaseRestHandler { +public final class RestInvalidateApiKeyAction extends ApiKeyBaseRestHandler { static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("invalidate_api_key", a -> { return new InvalidateApiKeyRequest((String) a[0], (String) a[1], (String) a[2], (String) a[3]); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java index 94317145b02d8..4734c39bc5af5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; import java.util.Arrays; @@ -45,7 +44,7 @@ * specification as this aspect does not make the most sense since the response body is * expected to be JSON */ -public final class RestGetTokenAction extends SecurityBaseRestHandler { +public final class RestGetTokenAction extends TokenBaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetTokenAction.class)); static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token_request", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java index 9801f3c93c839..cb72e53bcc8d3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; -import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; import java.io.IOException; @@ -33,7 +32,7 @@ /** * Rest handler for handling access token invalidation requests */ -public final class RestInvalidateTokenAction extends SecurityBaseRestHandler { +public final class RestInvalidateTokenAction extends TokenBaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestInvalidateTokenAction.class)); static final ConstructingObjectParser PARSER = diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/TokenBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/TokenBaseRestHandler.java new file mode 100644 index 0000000000000..7111a5387fe5b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/TokenBaseRestHandler.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.rest.action.oauth2; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +/** + * A base rest handler that handles licensing for Token actions + */ +abstract class TokenBaseRestHandler extends SecurityBaseRestHandler { + + protected Logger logger = LogManager.getLogger(getClass()); + + TokenBaseRestHandler(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (licenseState.isTokenServiceAllowed()) { + return null; + } else { + logger.info("Security tokens are not available under the current [{}] license", licenseState.getOperationMode().description()); + return LicenseUtils.newComplianceException("security tokens"); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index acd4f1c480500..8cb227519a01d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -34,18 +35,24 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.template.TemplateUtils; -import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; +import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -59,15 +66,17 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** - * Manages the lifecycle of a single index, its template, mapping and and data upgrades/migrations. + * Manages the lifecycle of a single index, its template, mapping and data upgrades/migrations. */ public class SecurityIndexManager implements ClusterStateListener { - public static final String INTERNAL_SECURITY_INDEX = ".security-" + IndexUpgradeCheckVersion.UPRADE_VERSION; + public static final String INTERNAL_SECURITY_INDEX = RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7; public static final int INTERNAL_INDEX_FORMAT = 6; public static final String SECURITY_VERSION_STRING = "security-version"; public static final String TEMPLATE_VERSION_PATTERN = Pattern.quote("${security.template.version}"); @@ -83,7 +92,7 @@ public class SecurityIndexManager implements ClusterStateListener { private volatile State indexState; public SecurityIndexManager(Client client, String indexName, ClusterService clusterService) { - this(client, indexName, new State(false, false, false, false, null, null)); + this(client, indexName, State.UNRECOVERED_STATE); clusterService.addListener(this); } @@ -97,10 +106,6 @@ public SecurityIndexManager freeze() { return new SecurityIndexManager(null, indexName, indexState); } - public static List indexNames() { - return Collections.unmodifiableList(Arrays.asList(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); - } - public boolean checkMappingVersion(Predicate requiredVersion) { // pull value into local variable for consistent view final State currentIndexState = this.indexState; @@ -127,6 +132,10 @@ public boolean isMappingUpToDate() { return this.indexState.mappingUpToDate; } + public boolean isStateRecovered() { + return this.indexState != State.UNRECOVERED_STATE; + } + public ElasticsearchException getUnavailableReason() { final State localState = this.indexState; if (localState.indexAvailable) { @@ -167,7 +176,9 @@ public void clusterChanged(ClusterChangedEvent event) { final Version mappingVersion = oldestIndexMappingVersion(event.state()); final ClusterHealthStatus indexStatus = indexMetaData == null ? null : new ClusterIndexHealth(indexMetaData, event.state().getRoutingTable().index(indexMetaData.getIndex())).getStatus(); - final State newState = new State(indexExists, isIndexUpToDate, indexAvailable, mappingIsUpToDate, mappingVersion, indexStatus); + final String concreteIndexName = indexMetaData == null ? INTERNAL_SECURITY_INDEX : indexMetaData.getIndex().getName(); + final State newState = new State(indexExists, isIndexUpToDate, indexAvailable, mappingIsUpToDate, mappingVersion, concreteIndexName, + indexStatus); this.indexState = newState; if (newState.equals(previousState) == false) { @@ -301,15 +312,19 @@ public void checkIndexVersionThenExecute(final Consumer consumer, fin public void prepareIndexIfNeededThenExecute(final Consumer consumer, final Runnable andThen) { final State indexState = this.indexState; // use a local copy so all checks execute against the same state! // TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings) - if (indexState.indexExists && indexState.isIndexUpToDate == false) { + if (indexState == State.UNRECOVERED_STATE) { + consumer.accept(new ElasticsearchStatusException("Cluster state has not been recovered yet, cannot write to the security index", + RestStatus.SERVICE_UNAVAILABLE)); + } else if (indexState.indexExists && indexState.isIndexUpToDate == false) { consumer.accept(new IllegalStateException( "Security index is not on the current version. Security features relying on the index will not be available until " + "the upgrade API is run on the security index")); } else if (indexState.indexExists == false) { + LOGGER.info("security index does not exist. Creating [{}] with alias [{}]", INTERNAL_SECURITY_INDEX, SECURITY_INDEX_NAME); Tuple mappingAndSettings = loadMappingAndSettingsSourceFromTemplate(); CreateIndexRequest request = new CreateIndexRequest(INTERNAL_SECURITY_INDEX) .alias(new Alias(SECURITY_INDEX_NAME)) - .mapping("doc", mappingAndSettings.v1(), XContentType.JSON) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappingAndSettings.v1(), XContentType.JSON) .waitForActiveShards(ActiveShardCount.ALL) .settings(mappingAndSettings.v2()); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, @@ -336,9 +351,12 @@ public void onFailure(Exception e) { } }, client.admin().indices()::create); } else if (indexState.mappingUpToDate == false) { - PutMappingRequest request = new PutMappingRequest(INTERNAL_SECURITY_INDEX) + LOGGER.info( + "security index [{}] (alias [{}]) is not up to date. Updating mapping", indexState.concreteIndexName, SECURITY_INDEX_NAME); + + PutMappingRequest request = new PutMappingRequest(indexState.concreteIndexName) .source(loadMappingAndSettingsSourceFromTemplate().v1(), XContentType.JSON) - .type("doc"); + .type(MapperService.SINGLE_MAPPING_NAME); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, ActionListener.wrap(putMappingResponse -> { if (putMappingResponse.isAcknowledged()) { @@ -353,10 +371,24 @@ public void onFailure(Exception e) { } private Tuple loadMappingAndSettingsSourceFromTemplate() { - final byte[] template = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json", - Version.CURRENT.toString(), SecurityIndexManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); - PutIndexTemplateRequest request = new PutIndexTemplateRequest(SECURITY_TEMPLATE_NAME).source(template, XContentType.JSON); - return new Tuple<>(request.mappings().get("doc"), request.settings()); + final byte[] template = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json", Version.CURRENT.toString(), + SecurityIndexManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); + final PutIndexTemplateRequest request = new PutIndexTemplateRequest(SECURITY_TEMPLATE_NAME).source(template, XContentType.JSON); + + final String mappingSource = request.mappings().get(MapperService.SINGLE_MAPPING_NAME); + try (XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mappingSource)) { + // remove the type wrapping to get the mapping + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); // { + ensureFieldName(parser, parser.nextToken(), MapperService.SINGLE_MAPPING_NAME); // _doc + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); // { + + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.generator().copyCurrentStructure(parser); + return new Tuple<>(Strings.toString(builder), request.settings()); + } catch (IOException e) { + throw ExceptionsHelper.convertToRuntime(e); + } } /** @@ -378,20 +410,23 @@ public static boolean isIndexDeleted(State previousState, State currentState) { * State of the security index. */ public static class State { + public static final State UNRECOVERED_STATE = new State(false, false, false, false, null, null, null); public final boolean indexExists; public final boolean isIndexUpToDate; public final boolean indexAvailable; public final boolean mappingUpToDate; public final Version mappingVersion; + public final String concreteIndexName; public final ClusterHealthStatus indexStatus; public State(boolean indexExists, boolean isIndexUpToDate, boolean indexAvailable, - boolean mappingUpToDate, Version mappingVersion, ClusterHealthStatus indexStatus) { + boolean mappingUpToDate, Version mappingVersion, String concreteIndexName, ClusterHealthStatus indexStatus) { this.indexExists = indexExists; this.isIndexUpToDate = isIndexUpToDate; this.indexAvailable = indexAvailable; this.mappingUpToDate = mappingUpToDate; this.mappingVersion = mappingVersion; + this.concreteIndexName = concreteIndexName; this.indexStatus = indexStatus; } @@ -405,12 +440,14 @@ public boolean equals(Object o) { indexAvailable == state.indexAvailable && mappingUpToDate == state.mappingUpToDate && Objects.equals(mappingVersion, state.mappingVersion) && + Objects.equals(concreteIndexName, state.concreteIndexName) && indexStatus == state.indexStatus; } @Override public int hashCode() { - return Objects.hash(indexExists, isIndexUpToDate, indexAvailable, mappingUpToDate, mappingVersion, indexStatus); + return Objects.hash(indexExists, isIndexUpToDate, indexAvailable, mappingUpToDate, mappingVersion, concreteIndexName, + indexStatus); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index b924d378f9a2a..1182800922a9c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -188,12 +188,12 @@ protected Map initializeProfileFilters(Destructiv case "client": profileFilters.put(entry.getKey(), new ServerTransportFilter.ClientProfile(authcService, authzService, threadPool.getThreadContext(), extractClientCert, destructiveOperations, reservedRealmEnabled, - securityContext)); + securityContext, licenseState)); break; case "node": profileFilters.put(entry.getKey(), new ServerTransportFilter.NodeProfile(authcService, authzService, threadPool.getThreadContext(), extractClientCert, destructiveOperations, reservedRealmEnabled, - securityContext)); + securityContext, licenseState)); break; default: throw new IllegalStateException("unknown profile type: " + type); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index 29ea8838f58e6..2d1f63f5cc15c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexAction; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.transport.TaskTransportChannel; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransportChannel; @@ -66,10 +67,11 @@ class NodeProfile implements ServerTransportFilter { private final DestructiveOperations destructiveOperations; private final boolean reservedRealmEnabled; private final SecurityContext securityContext; + private final XPackLicenseState licenseState; NodeProfile(AuthenticationService authcService, AuthorizationService authzService, ThreadContext threadContext, boolean extractClientCert, DestructiveOperations destructiveOperations, - boolean reservedRealmEnabled, SecurityContext securityContext) { + boolean reservedRealmEnabled, SecurityContext securityContext, XPackLicenseState licenseState) { this.authcService = authcService; this.authzService = authzService; this.threadContext = threadContext; @@ -77,6 +79,7 @@ class NodeProfile implements ServerTransportFilter { this.destructiveOperations = destructiveOperations; this.reservedRealmEnabled = reservedRealmEnabled; this.securityContext = securityContext; + this.licenseState = licenseState; } @Override @@ -116,14 +119,20 @@ requests from all the nodes are attached with a user (either a serialize final Version version = transportChannel.getVersion(); authcService.authenticate(securityAction, request, (User)null, ActionListener.wrap((authentication) -> { - if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && - SystemUser.is(authentication.getUser()) == false) { - securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { - final Authentication replaced = Authentication.getAuthentication(threadContext); - authzService.authorize(replaced, securityAction, request, listener); - }, version); + if (authentication != null) { + if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && + SystemUser.is(authentication.getUser()) == false) { + securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { + final Authentication replaced = Authentication.getAuthentication(threadContext); + authzService.authorize(replaced, securityAction, request, listener); + }, version); + } else { + authzService.authorize(authentication, securityAction, request, listener); + } + } else if (licenseState.isAuthAllowed() == false) { + listener.onResponse(null); } else { - authzService.authorize(authentication, securityAction, request, listener); + listener.onFailure(new IllegalStateException("no authentication present but auth is allowed")); } }, listener::onFailure)); } @@ -139,9 +148,9 @@ class ClientProfile extends NodeProfile { ClientProfile(AuthenticationService authcService, AuthorizationService authzService, ThreadContext threadContext, boolean extractClientCert, DestructiveOperations destructiveOperations, - boolean reservedRealmEnabled, SecurityContext securityContext) { + boolean reservedRealmEnabled, SecurityContext securityContext, XPackLicenseState licenseState) { super(authcService, authzService, threadContext, extractClientCert, destructiveOperations, reservedRealmEnabled, - securityContext); + securityContext, licenseState); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 73948b4192416..f958d01c462f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -21,11 +21,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -54,6 +54,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -68,7 +69,7 @@ @TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE,org.elasticsearch.action.search:TRACE," + "org.elasticsearch.search:TRACE") public class LicensingTests extends SecurityIntegTestCase { - public static final String ROLES = + private static final String ROLES = SecuritySettingsSource.TEST_ROLE + ":\n" + " cluster: [ all ]\n" + " indices:\n" + @@ -91,7 +92,7 @@ public class LicensingTests extends SecurityIntegTestCase { " - names: 'b'\n" + " privileges: [all]\n"; - public static final String USERS_ROLES = + private static final String USERS_ROLES = SecuritySettingsSource.CONFIG_STANDARD_USER_ROLES + "role_a:user_a,user_b\n" + "role_b:user_b\n"; @@ -131,8 +132,8 @@ protected int maxNumberOfNodes() { } @Before - public void resetLicensing() { - enableLicensing(); + public void resetLicensing() throws InterruptedException { + enableLicensing(OperationMode.MISSING); } @After @@ -140,6 +141,7 @@ public void cleanupSecurityIndex() { deleteSecurityIndex(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42215") public void testEnableDisableBehaviour() throws Exception { IndexResponse indexResponse = index("test", "type", jsonBuilder() .startObject() @@ -155,11 +157,7 @@ public void testEnableDisableBehaviour() throws Exception { assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); refresh(); - // wait for all replicas to be started (to make sure that there are no more cluster state updates when we disable licensing) - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().routingTable() - .shardsWithState(ShardRoutingState.INITIALIZING).isEmpty())); - - Client client = internalCluster().transportClient(); + final Client client = internalCluster().transportClient(); disableLicensing(); @@ -233,7 +231,7 @@ public void testSecurityActionsByLicenseType() throws Exception { // enable a license that enables security License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, - License.OperationMode.PLATINUM, License.OperationMode.STANDARD); + License.OperationMode.PLATINUM, License.OperationMode.STANDARD, OperationMode.BASIC); enableLicensing(mode); // security actions should work! try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { @@ -273,7 +271,6 @@ public void testTransportClientAuthenticationByLicenseType() throws Exception { public void testNodeJoinWithoutSecurityExplicitlyEnabled() throws Exception { License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.PLATINUM, License.OperationMode.STANDARD); enableLicensing(mode); - ensureGreen(); final List seedHosts = internalCluster().masterClient().admin().cluster().nodesInfo(new NodesInfoRequest()).get() .getNodes().stream().map(n -> n.getTransport().getAddress().publishAddress().toString()).distinct() @@ -304,23 +301,64 @@ private static void assertElasticsearchSecurityException(ThrowingRunnable runnab assertThat(ee.status(), is(RestStatus.FORBIDDEN)); } - public static void disableLicensing() { - disableLicensing(License.OperationMode.BASIC); - } - - public static void disableLicensing(License.OperationMode operationMode) { - for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { - licenseState.update(operationMode, false, null); - } - } - - public static void enableLicensing() { - enableLicensing(License.OperationMode.BASIC); + private void disableLicensing() throws InterruptedException { + // This method first makes sure licensing is enabled everywhere so that we can execute + // monitoring actions to ensure we have a stable cluster and only then do we disable. + // This is done in an await busy since there is a chance that the enabling of the license + // is overwritten by some other cluster activity and the node throws an exception while we + // wait for things to stabilize! + final boolean success = awaitBusy(() -> { + try { + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + if (licenseState.isAuthAllowed() == false) { + enableLicensing(OperationMode.BASIC); + break; + } + } + + ensureGreen(); + ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); + + // apply the disabling of the license once the cluster is stable + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + licenseState.update(OperationMode.BASIC, false, null); + } + } catch (Exception e) { + logger.error("Caught exception while disabling license", e); + return false; + } + return true; + }, 30L, TimeUnit.SECONDS); + assertTrue(success); } - public static void enableLicensing(License.OperationMode operationMode) { - for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { - licenseState.update(operationMode, true, null); - } + private void enableLicensing(License.OperationMode operationMode) throws InterruptedException { + // do this in an await busy since there is a chance that the enabling of the license is + // overwritten by some other cluster activity and the node throws an exception while we + // wait for things to stabilize! + final boolean success = awaitBusy(() -> { + try { + // first update the license so we can execute monitoring actions + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + licenseState.update(operationMode, true, null); + } + + ensureGreen(); + ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); + + // re-apply the update in case any node received an updated cluster state that triggered the license state + // to change + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + licenseState.update(operationMode, true, null); + } + } catch (Exception e) { + logger.error("Caught exception while enabling license", e); + return false; + } + return true; + }, 30L, TimeUnit.SECONDS); + assertTrue(success); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index a8b2bf4b5350d..9f35987548270 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.security.transport.filter.IPFilter; import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -77,7 +78,7 @@ public void testEnabled() { rolesStore, roleMappingStore, ipFilter); assertThat(featureSet.enabled(), is(true)); - when(licenseState.isSecurityDisabledByTrialLicense()).thenReturn(true); + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(true); featureSet = new SecurityFeatureSet(settings, licenseState, realms, rolesStore, roleMappingStore, ipFilter); assertThat(featureSet.enabled(), is(false)); @@ -108,29 +109,10 @@ public void testUsage() throws Exception { final boolean rolesStoreEnabled = randomBoolean(); - doAnswer(invocationOnMock -> { - ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[0]; - if (rolesStoreEnabled) { - listener.onResponse(Collections.singletonMap("count", 1)); - } else { - listener.onResponse(Collections.emptyMap()); - } - return Void.TYPE; - }).when(rolesStore).usageStats(any(ActionListener.class)); + configureRoleStoreUsage(rolesStoreEnabled); final boolean roleMappingStoreEnabled = randomBoolean(); - doAnswer(invocationOnMock -> { - ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; - if (roleMappingStoreEnabled) { - final Map map = new HashMap<>(); - map.put("size", 12L); - map.put("enabled", 10L); - listener.onResponse(map); - } else { - listener.onResponse(Collections.emptyMap()); - } - return Void.TYPE; - }).when(roleMappingStore).usageStats(any(ActionListener.class)); + configureRoleMappingStoreUsage(roleMappingStoreEnabled); Map realmsUsageStats = new HashMap<>(); for (int i = 0; i < 5; i++) { @@ -140,11 +122,7 @@ public void testUsage() throws Exception { realmUsage.put("key2", Arrays.asList(i)); realmUsage.put("key3", Arrays.asList(i % 2 == 0)); } - doAnswer(invocationOnMock -> { - ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; - listener.onResponse(realmsUsageStats); - return Void.TYPE; - }).when(realms).usageStats(any(ActionListener.class)); + configureRealmsUsage(realmsUsageStats); final boolean anonymousEnabled = randomBoolean(); if (anonymousEnabled) { @@ -164,11 +142,7 @@ public void testUsage() throws Exception { assertThat(usage.name(), is(XPackField.SECURITY)); assertThat(usage.enabled(), is(enabled)); assertThat(usage.available(), is(authcAuthzAvailable)); - XContentSource source; - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - usage.toXContent(builder, ToXContent.EMPTY_PARAMS); - source = new XContentSource(builder); - } + XContentSource source = getXContentSource(usage); if (enabled) { if (authcAuthzAvailable) { @@ -225,4 +199,101 @@ public void testUsage() throws Exception { } } } + + public void testUsageOnTrialLicenseWithSecurityDisabledByDefault() throws Exception { + when(licenseState.isSecurityAvailable()).thenReturn(true); + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(true); + + Settings.Builder settings = Settings.builder().put(this.settings); + + final boolean httpSSLEnabled = randomBoolean(); + settings.put("xpack.security.http.ssl.enabled", httpSSLEnabled); + final boolean transportSSLEnabled = randomBoolean(); + settings.put("xpack.security.transport.ssl.enabled", transportSSLEnabled); + + final boolean auditingEnabled = randomBoolean(); + settings.put(XPackSettings.AUDIT_ENABLED.getKey(), auditingEnabled); + + final boolean rolesStoreEnabled = randomBoolean(); + configureRoleStoreUsage(rolesStoreEnabled); + + final boolean roleMappingStoreEnabled = randomBoolean(); + configureRoleMappingStoreUsage(roleMappingStoreEnabled); + + configureRealmsUsage(Collections.emptyMap()); + + SecurityFeatureSet featureSet = new SecurityFeatureSet(settings.build(), licenseState, + realms, rolesStore, roleMappingStore, ipFilter); + PlainActionFuture future = new PlainActionFuture<>(); + featureSet.usage(future); + XPackFeatureSet.Usage securityUsage = future.get(); + BytesStreamOutput out = new BytesStreamOutput(); + securityUsage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); + for (XPackFeatureSet.Usage usage : Arrays.asList(securityUsage, serializedUsage)) { + assertThat(usage, is(notNullValue())); + assertThat(usage.name(), is(XPackField.SECURITY)); + assertThat(usage.enabled(), is(false)); + assertThat(usage.available(), is(true)); + XContentSource source = getXContentSource(usage); + + // check SSL : This is permitted even though security has been dynamically disabled by the trial license. + assertThat(source.getValue("ssl"), is(notNullValue())); + assertThat(source.getValue("ssl.http.enabled"), is(httpSSLEnabled)); + assertThat(source.getValue("ssl.transport.enabled"), is(transportSSLEnabled)); + + // everything else is missing because security is disabled + assertThat(source.getValue("realms"), is(nullValue())); + assertThat(source.getValue("token_service"), is(nullValue())); + assertThat(source.getValue("api_key_service"), is(nullValue())); + assertThat(source.getValue("audit"), is(nullValue())); + assertThat(source.getValue("anonymous"), is(nullValue())); + assertThat(source.getValue("ipfilter"), is(nullValue())); + assertThat(source.getValue("roles"), is(nullValue())); + } + } + + private XContentSource getXContentSource(XPackFeatureSet.Usage usage) throws IOException { + XContentSource source; + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + usage.toXContent(builder, ToXContent.EMPTY_PARAMS); + source = new XContentSource(builder); + } + return source; + } + + private void configureRealmsUsage(Map realmsUsageStats) { + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; + listener.onResponse(realmsUsageStats); + return Void.TYPE; + }).when(realms).usageStats(any(ActionListener.class)); + } + + private void configureRoleStoreUsage(boolean rolesStoreEnabled) { + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[0]; + if (rolesStoreEnabled) { + listener.onResponse(Collections.singletonMap("count", 1)); + } else { + listener.onResponse(Collections.emptyMap()); + } + return Void.TYPE; + }).when(rolesStore).usageStats(any(ActionListener.class)); + } + + private void configureRoleMappingStoreUsage(boolean roleMappingStoreEnabled) { + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; + if (roleMappingStoreEnabled) { + final Map map = new HashMap<>(); + map.put("size", 12L); + map.put("enabled", 10L); + listener.onResponse(map); + } else { + listener.onResponse(Collections.emptyMap()); + } + return Void.TYPE; + }).when(roleMappingStore).usageStats(any(ActionListener.class)); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 5b7ce8b1d03eb..ea2aafb2fe9a4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -53,7 +53,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -66,7 +65,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; -import static org.elasticsearch.discovery.DiscoveryModule.ZEN_DISCOVERY_TYPE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_INDEX_FORMAT; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.containsString; @@ -253,17 +251,45 @@ public void testTLSJoinValidator() throws Exception { int numIters = randomIntBetween(1, 10); for (int i = 0; i < numIters; i++) { boolean tlsOn = randomBoolean(); - String discoveryType = randomFrom("single-node", ZEN_DISCOVERY_TYPE, ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); - Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType); + boolean securityExplicitlyEnabled = randomBoolean(); + String discoveryType = randomFrom("single-node", ZEN2_DISCOVERY_TYPE, ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); + + final Settings settings; + if (securityExplicitlyEnabled) { + settings = Settings.builder().put("xpack.security.enabled", true).build(); + } else { + settings = Settings.EMPTY; + } + Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType, settings); MetaData.Builder builder = MetaData.builder(); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + License.OperationMode licenseMode = randomFrom(License.OperationMode.values()); + License license = TestUtils.generateSignedLicense(licenseMode.description(), TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); - EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, - License.OperationMode.STANDARD); - if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { + + final boolean expectFailure; + switch (licenseMode) { + case PLATINUM: + case GOLD: + case STANDARD: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false; + break; + case BASIC: + expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false && securityExplicitlyEnabled; + break; + case MISSING: + case TRIAL: + expectFailure = false; + break; + default: + throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); + } + logger.info("Test TLS join; Lic:{} TLS:{} Disco:{} Settings:{} ; Expect Failure: {}", + licenseMode, tlsOn, discoveryType, settings.toDelimitedString(','), expectFailure); + if (expectFailure) { IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); - assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); + assertEquals("Transport TLS ([xpack.security.transport.ssl.enabled]) is required for license type [" + + license.operationMode().description() + "] when security is enabled", ise.getMessage()); } else { validator.accept(node, state); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 66485f0505c5f..4f32fe7ad8ced 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.tasks.Task; @@ -197,8 +198,11 @@ void doExecute(Action action, Request request, ActionListener null, null, Collections.emptySet()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 085df140f3ecb..affedfa5b7472 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -202,8 +203,10 @@ public void setup() throws Exception { }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); when(securityIndex.isAvailable()).thenReturn(true); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isTokenServiceAllowed()).thenReturn(true); final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); + tokenService = new TokenService(settings, Clock.systemUTC(), client, licenseState, securityIndex, clusterService); final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java index d7640264cc079..9a88f0e0dd19f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenActionTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.node.Node; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -70,6 +71,7 @@ public class TransportCreateTokenActionTests extends ESTestCase { private ClusterService clusterService; private AtomicReference idxReqReference; private AuthenticationService authenticationService; + private XPackLicenseState license; @Before public void setupClient() { @@ -134,6 +136,9 @@ public void setupClient() { any(UsernamePasswordToken.class), any(ActionListener.class)); this.clusterService = ClusterServiceUtils.createClusterService(threadPool); + + this.license = mock(XPackLicenseState.class); + when(license.isTokenServiceAllowed()).thenReturn(true); } @After @@ -144,7 +149,7 @@ public void stopThreadPool() throws Exception { } public void testClientCredentialsCreatesWithoutRefreshToken() throws Exception { - final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, license, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); authentication.writeToContext(threadPool.getThreadContext()); @@ -168,7 +173,7 @@ public void testClientCredentialsCreatesWithoutRefreshToken() throws Exception { } public void testPasswordGrantTypeCreatesWithRefreshToken() throws Exception { - final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, securityIndex, clusterService); + final TokenService tokenService = new TokenService(SETTINGS, Clock.systemUTC(), client, license, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe"), new Authentication.RealmRef("realm", "type", "node"), null); authentication.writeToContext(threadPool.getThreadContext()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 69e008f60c696..256bf6d9df532 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -6,24 +6,26 @@ package org.elasticsearch.xpack.security.authc; -import org.elasticsearch.ElasticsearchException; +import com.google.common.collect.Sets; + import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.ApiKey; import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; @@ -48,27 +50,28 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; -@TestLogging("org.elasticsearch.xpack.security.authc.ApiKeyService:TRACE") public class ApiKeyIntegTests extends SecurityIntegTestCase { + private static final long DELETE_INTERVAL_MILLIS = 100L; @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true) - .put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(200L)) + .put(ApiKeyService.DELETE_INTERVAL.getKey(), TimeValue.timeValueMillis(DELETE_INTERVAL_MILLIS)) .put(ApiKeyService.DELETE_TIMEOUT.getKey(), TimeValue.timeValueSeconds(5L)) .build(); } @@ -81,11 +84,15 @@ public void waitForSecurityIndexWritable() throws Exception { @After public void wipeSecurityIndex() throws InterruptedException { // get the api key service and wait until api key expiration is not in progress! + awaitApiKeysRemoverCompletion(); + deleteSecurityIndex(); + } + + private void awaitApiKeysRemoverCompletion() throws InterruptedException { for (ApiKeyService apiKeyService : internalCluster().getInstances(ApiKeyService.class)) { final boolean done = awaitBusy(() -> apiKeyService.isExpirationInProgress() == false); assertTrue(done); } - deleteSecurityIndex(); } public void testCreateApiKey() { @@ -237,55 +244,6 @@ public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, Ex verifyInvalidateResponse(1, responses, invalidateResponse); } - public void testGetAndInvalidateApiKeysWithExpiredAndInvalidatedApiKey() throws Exception { - List responses = createApiKeys(1, null); - Instant created = Instant.now(); - - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken - .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); - SecurityClient securityClient = new SecurityClient(client); - - AtomicReference docId = new AtomicReference<>(); - assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", "api_key"))).setSize(10) - .setTerminateAfter(10).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - docId.set(searchResponse.getHits().getAt(0).getId()); - }); - logger.info("searched and found API key with doc id = " + docId.get()); - assertThat(docId.get(), is(notNullValue())); - assertThat(docId.get(), is(responses.get(0).getId())); - - // hack doc to modify the expiration time to the week before - Instant weekBefore = created.minus(8L, ChronoUnit.DAYS); - assertTrue(Instant.now().isAfter(weekBefore)); - client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, "doc", docId.get()) - .setDoc("expiration_time", weekBefore.toEpochMilli()).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - - PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); - InvalidateApiKeyResponse invalidateResponse = listener.get(); - if (invalidateResponse.getErrors().isEmpty() == false) { - logger.error("error occurred while invalidating API key by id : " + invalidateResponse.getErrors().stream() - .map(ElasticsearchException::getMessage) - .collect(Collectors.joining(", "))); - } - verifyInvalidateResponse(1, responses, invalidateResponse); - - // try again - listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); - invalidateResponse = listener.get(); - assertTrue(invalidateResponse.getInvalidatedApiKeys().isEmpty()); - - // Get API key though returns the API key information - PlainActionFuture listener1 = new PlainActionFuture<>(); - securityClient.getApiKey(GetApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener1); - GetApiKeyResponse response = listener1.get(); - verifyGetResponse(1, responses, response, Collections.emptySet(), Collections.singletonList(responses.get(0).getId())); - } - private void verifyInvalidateResponse(int noOfApiKeys, List responses, InvalidateApiKeyResponse invalidateResponse) { assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(noOfApiKeys)); @@ -296,79 +254,146 @@ private void verifyInvalidateResponse(int noOfApiKeys, List responses = createApiKeys(2, null); + Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + + List createdApiKeys = createApiKeys(2, null); - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken - .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); SecurityClient securityClient = new SecurityClient(client); + PlainActionFuture listener = new PlainActionFuture<>(); - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(0).getId()), listener); + securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(0).getId()), listener); InvalidateApiKeyResponse invalidateResponse = listener.get(); assertThat(invalidateResponse.getInvalidatedApiKeys().size(), equalTo(1)); assertThat(invalidateResponse.getPreviouslyInvalidatedApiKeys().size(), equalTo(0)); assertThat(invalidateResponse.getErrors().size(), equalTo(0)); - AtomicReference docId = new AtomicReference<>(); - assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", "api_key"))).setSize(10) - .setTerminateAfter(10).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - docId.set(searchResponse.getHits().getAt(0).getId()); - }); - logger.info("searched and found API key with doc id = " + docId.get()); - assertThat(docId.get(), is(notNullValue())); - assertThat(docId.get(), isIn(responses.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toList()))); - AtomicBoolean deleteTriggered = new AtomicBoolean(false); - assertBusy(() -> { - if (deleteTriggered.compareAndSet(false, true)) { - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId()), new PlainActionFuture<>()); + PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); + securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(2)); + + client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + securityClient = new SecurityClient(client); + + // invalidate API key to trigger remover + listener = new PlainActionFuture<>(); + securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(1).getId()), listener); + assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); + + awaitApiKeysRemoverCompletion(); + + refreshSecurityIndex(); + + // Verify that 1st invalidated API key is deleted whereas the next one is not + getApiKeyResponseListener = new PlainActionFuture<>(); + securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(1)); + ApiKey apiKey = getApiKeyResponseListener.get().getApiKeyInfos()[0]; + assertThat(apiKey.getId(), is(createdApiKeys.get(1).getId())); + assertThat(apiKey.isInvalidated(), is(true)); + } + + private Client waitForExpiredApiKeysRemoverTriggerReadyAndGetClient() throws Exception { + String nodeWithMostRecentRun = null; + long apiKeyLastTrigger = -1L; + for (String nodeName : internalCluster().getNodeNames()) { + ApiKeyService apiKeyService = internalCluster().getInstance(ApiKeyService.class, nodeName); + if (apiKeyService != null) { + if (apiKeyService.lastTimeWhenApiKeysRemoverWasTriggered() > apiKeyLastTrigger) { + nodeWithMostRecentRun = nodeName; + apiKeyLastTrigger = apiKeyService.lastTimeWhenApiKeysRemoverWasTriggered(); + } } - client.admin().indices().prepareRefresh(SecurityIndexManager.SECURITY_INDEX_NAME).get(); - SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", "api_key"))) - .setTerminateAfter(10).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - }, 30, TimeUnit.SECONDS); + } + final ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeWithMostRecentRun); + final long lastRunTime = apiKeyLastTrigger; + assertBusy(() -> { + assertThat(threadPool.relativeTimeInMillis() - lastRunTime, greaterThan(DELETE_INTERVAL_MILLIS)); + }); + return internalCluster().client(nodeWithMostRecentRun); } - public void testExpiredApiKeysDeletedAfter1Week() throws Exception { - List responses = createApiKeys(2, null); + public void testExpiredApiKeysBehaviorWhenKeysExpired1WeekBeforeAnd1DayBefore() throws Exception { + Client client = waitForExpiredApiKeysRemoverTriggerReadyAndGetClient().filterWithHeader( + Collections.singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + + int noOfKeys = 4; + List createdApiKeys = createApiKeys(noOfKeys, null); Instant created = Instant.now(); - Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken - .basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); SecurityClient securityClient = new SecurityClient(client); - AtomicReference docId = new AtomicReference<>(); - assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", "api_key"))).setSize(10) - .setTerminateAfter(10).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - docId.set(searchResponse.getHits().getAt(0).getId()); - }); - logger.info("searched and found API key with doc id = " + docId.get()); - assertThat(docId.get(), is(notNullValue())); - assertThat(docId.get(), isIn(responses.stream().map(CreateApiKeyResponse::getId).collect(Collectors.toList()))); - + PlainActionFuture getApiKeyResponseListener = new PlainActionFuture<>(); + securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(noOfKeys)); + + // Expire the 1st key such that it cannot be deleted by the remover + // hack doc to modify the expiration time to a day before + Instant dayBefore = created.minus(1L, ChronoUnit.DAYS); + assertTrue(Instant.now().isAfter(dayBefore)); + UpdateResponse expirationDateUpdatedResponse = client + .prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, createdApiKeys.get(0).getId()) + .setDoc("expiration_time", dayBefore.toEpochMilli()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED)); + + // Expire the 2nd key such that it cannot be deleted by the remover // hack doc to modify the expiration time to the week before Instant weekBefore = created.minus(8L, ChronoUnit.DAYS); assertTrue(Instant.now().isAfter(weekBefore)); - client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, "doc", docId.get()) - .setDoc("expiration_time", weekBefore.toEpochMilli()).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + expirationDateUpdatedResponse = client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, createdApiKeys.get(1).getId()) + .setDoc("expiration_time", weekBefore.toEpochMilli()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + assertThat(expirationDateUpdatedResponse.getResult(), is(DocWriteResponse.Result.UPDATED)); - AtomicBoolean deleteTriggered = new AtomicBoolean(false); - assertBusy(() -> { - if (deleteTriggered.compareAndSet(false, true)) { - securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(responses.get(1).getId()), new PlainActionFuture<>()); + // Invalidate to trigger the remover + PlainActionFuture listener = new PlainActionFuture<>(); + securityClient.invalidateApiKey(InvalidateApiKeyRequest.usingApiKeyId(createdApiKeys.get(2).getId()), listener); + assertThat(listener.get().getInvalidatedApiKeys().size(), is(1)); + + awaitApiKeysRemoverCompletion(); + + refreshSecurityIndex(); + + // Verify get API keys does not return expired and deleted key + getApiKeyResponseListener = new PlainActionFuture<>(); + securityClient.getApiKey(GetApiKeyRequest.usingRealmName("file"), getApiKeyResponseListener); + assertThat(getApiKeyResponseListener.get().getApiKeyInfos().length, is(3)); + + Set expectedKeyIds = Sets.newHashSet(createdApiKeys.get(0).getId(), createdApiKeys.get(2).getId(), + createdApiKeys.get(3).getId()); + for (ApiKey apiKey : getApiKeyResponseListener.get().getApiKeyInfos()) { + assertThat(apiKey.getId(), isIn(expectedKeyIds)); + if (apiKey.getId().equals(createdApiKeys.get(0).getId())) { + // has been expired, not invalidated + assertTrue(apiKey.getExpiration().isBefore(Instant.now())); + assertThat(apiKey.isInvalidated(), is(false)); + } else if (apiKey.getId().equals(createdApiKeys.get(2).getId())) { + // has not been expired as no expiration, but invalidated + assertThat(apiKey.getExpiration(), is(nullValue())); + assertThat(apiKey.isInvalidated(), is(true)); + } else if (apiKey.getId().equals(createdApiKeys.get(3).getId())) { + // has not been expired as no expiration, not invalidated + assertThat(apiKey.getExpiration(), is(nullValue())); + assertThat(apiKey.isInvalidated(), is(false)); + } else { + fail("unexpected API key " + apiKey); } - client.admin().indices().prepareRefresh(SecurityIndexManager.SECURITY_INDEX_NAME).get(); - SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", "api_key"))) - .setTerminateAfter(10).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - }, 30, TimeUnit.SECONDS); + } + } + + private void refreshSecurityIndex() throws Exception { + assertBusy(() -> { + final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh(SecurityIndexManager.SECURITY_INDEX_NAME) + .get(); + assertThat(refreshResponse.getFailedShards(), is(0)); + }); } public void testActiveApiKeysWithNoExpirationNeverGetDeletedByRemover() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 507cb8349aa46..0491d20d74c8a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -18,10 +19,12 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; @@ -29,17 +32,20 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; -import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyCredentials; import org.elasticsearch.xpack.security.authc.ApiKeyService.ApiKeyRoleDescriptors; import org.elasticsearch.xpack.security.authc.ApiKeyService.CachedApiKeyHashResult; import org.elasticsearch.xpack.security.authz.store.NativePrivilegeStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.elasticsearch.xpack.security.test.SecurityMocks; import org.junit.After; import org.junit.Before; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Clock; +import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.Base64; @@ -48,19 +54,26 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ApiKeyServiceTests extends ESTestCase { private ThreadPool threadPool; + private XPackLicenseState licenseState; + private Client client; + private SecurityIndexManager securityIndex; @Before public void createThreadPool() { @@ -72,6 +85,15 @@ public void stopThreadPool() { terminate(threadPool); } + @Before + public void setupMocks() { + this.licenseState = mock(XPackLicenseState.class); + when(licenseState.isApiKeyServiceAllowed()).thenReturn(true); + + this.client = mock(Client.class); + this.securityIndex = SecurityMocks.mockSecurityIndexManager(); + } + public void testGetCredentialsFromThreadContext() { ThreadContext threadContext = threadPool.getThreadContext(); assertNull(ApiKeyService.getCredentialsFromHeader(threadContext)); @@ -107,6 +129,57 @@ public void testGetCredentialsFromThreadContext() { } } + public void testAuthenticateWithApiKey() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String key = randomAlphaOfLength(16); + + mockKeyDocument(service, id, key, new User("hulk", "superuser")); + + final AuthenticationResult auth = tryAuthenticate(service, id, key); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + assertThat(auth.getUser(), notNullValue()); + assertThat(auth.getUser().principal(), is("hulk")); + } + + public void testAuthenticationIsSkippedIfLicenseDoesNotAllowIt() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String key = randomAlphaOfLength(16); + + mockKeyDocument(service, id, key, new User(randomAlphaOfLength(6), randomAlphaOfLength(12))); + + when(licenseState.isApiKeyServiceAllowed()).thenReturn(false); + final AuthenticationResult auth = tryAuthenticate(service, id, key); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(auth.getUser(), nullValue()); + } + + public void mockKeyDocument(ApiKeyService service, String id, String key, User user) throws IOException { + final Authentication authentication = new Authentication(user, new RealmRef("realm1", "native", "node01"), null, Version.CURRENT); + final XContentBuilder docSource = service.newDocument(new SecureString(key.toCharArray()), "test", authentication, + Collections.singleton(SUPERUSER_ROLE_DESCRIPTOR), Instant.now(), Instant.now().plusSeconds(3600), null, Version.CURRENT); + + SecurityMocks.mockGetRequest(client, id, BytesReference.bytes(docSource)); + } + + private AuthenticationResult tryAuthenticate(ApiKeyService service, String id, String key) throws Exception { + final ThreadContext threadContext = threadPool.getThreadContext(); + final String header = "ApiKey " + Base64.getEncoder().encodeToString((id + ":" + key).getBytes(StandardCharsets.UTF_8)); + threadContext.putHeader("Authorization", header); + + final PlainActionFuture future = new PlainActionFuture<>(); + service.authenticateWithApiKeyIfPresent(threadContext, future); + + final AuthenticationResult auth = future.get(); + assertThat(auth, notNullValue()); + return auth; + } + public void testValidateApiKey() throws Exception { final String apiKey = randomAlphaOfLength(16); Hasher hasher = randomFrom(Hasher.PBKDF2, Hasher.BCRYPT4, Hasher.BCRYPT); @@ -122,8 +195,7 @@ public void testValidateApiKey() throws Exception { sourceMap.put("creator", creatorMap); sourceMap.put("api_key_invalidated", false); - ApiKeyService service = new ApiKeyService(Settings.EMPTY, Clock.systemUTC(), null, null, - ClusterServiceUtils.createClusterService(threadPool), threadPool); + ApiKeyService service = createApiKeyService(Settings.EMPTY); ApiKeyService.ApiKeyCredentials creds = new ApiKeyService.ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); @@ -136,7 +208,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getUser().metadata(), is(Collections.emptyMap())); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("role_descriptors"))); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY), - equalTo(sourceMap.get("limited_by_role_descriptors"))); + equalTo(sourceMap.get("limited_by_role_descriptors"))); sourceMap.put("expiration_time", Clock.systemUTC().instant().plus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); @@ -149,7 +221,7 @@ public void testValidateApiKey() throws Exception { assertThat(result.getUser().metadata(), is(Collections.emptyMap())); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY), equalTo(sourceMap.get("role_descriptors"))); assertThat(result.getMetadata().get(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY), - equalTo(sourceMap.get("limited_by_role_descriptors"))); + equalTo(sourceMap.get("limited_by_role_descriptors"))); sourceMap.put("expiration_time", Clock.systemUTC().instant().minus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); @@ -165,7 +237,7 @@ public void testValidateApiKey() throws Exception { result = future.get(); assertNotNull(result); assertFalse(result.isAuthenticated()); - + sourceMap.put("api_key_invalidated", true); creds = new ApiKeyService.ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(randomAlphaOfLength(15).toCharArray())); future = new PlainActionFuture<>(); @@ -179,7 +251,7 @@ public void testGetRolesForApiKeyNotInContext() throws Exception { Map superUserRdMap; try (XContentBuilder builder = JsonXContent.contentBuilder()) { superUserRdMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), - BytesReference.bytes(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR + BytesReference.bytes(SUPERUSER_ROLE_DESCRIPTOR .toXContent(builder, ToXContent.EMPTY_PARAMS, true)) .streamInput(), false); @@ -187,14 +259,13 @@ public void testGetRolesForApiKeyNotInContext() throws Exception { Map authMetadata = new HashMap<>(); authMetadata.put(ApiKeyService.API_KEY_ID_KEY, randomAlphaOfLength(12)); authMetadata.put(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY, - Collections.singletonMap(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName(), superUserRdMap)); + Collections.singletonMap(SUPERUSER_ROLE_DESCRIPTOR.getName(), superUserRdMap)); authMetadata.put(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, - Collections.singletonMap(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName(), superUserRdMap)); + Collections.singletonMap(SUPERUSER_ROLE_DESCRIPTOR.getName(), superUserRdMap)); final Authentication authentication = new Authentication(new User("joe"), new RealmRef("apikey", "apikey", "node"), null, Version.CURRENT, AuthenticationType.API_KEY, authMetadata); - ApiKeyService service = new ApiKeyService(Settings.EMPTY, Clock.systemUTC(), null, null, - ClusterServiceUtils.createClusterService(threadPool), threadPool); + ApiKeyService service = createApiKeyService(Settings.EMPTY); PlainActionFuture roleFuture = new PlainActionFuture<>(); service.getRoleForApiKey(authentication, roleFuture); @@ -208,22 +279,22 @@ public void testGetRolesForApiKey() throws Exception { authMetadata.put(ApiKeyService.API_KEY_ID_KEY, randomAlphaOfLength(12)); boolean emptyApiKeyRoleDescriptor = randomBoolean(); final RoleDescriptor roleARoleDescriptor = new RoleDescriptor("a role", new String[] { "monitor" }, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("monitor").build() }, - null); + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("monitor").build() }, + null); Map roleARDMap; try (XContentBuilder builder = JsonXContent.contentBuilder()) { roleARDMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), - BytesReference.bytes(roleARoleDescriptor.toXContent(builder, ToXContent.EMPTY_PARAMS, true)).streamInput(), false); + BytesReference.bytes(roleARoleDescriptor.toXContent(builder, ToXContent.EMPTY_PARAMS, true)).streamInput(), false); } authMetadata.put(ApiKeyService.API_KEY_ROLE_DESCRIPTORS_KEY, - (emptyApiKeyRoleDescriptor) ? randomFrom(Arrays.asList(null, Collections.emptyMap())) - : Collections.singletonMap("a role", roleARDMap)); + (emptyApiKeyRoleDescriptor) ? randomFrom(Arrays.asList(null, Collections.emptyMap())) + : Collections.singletonMap("a role", roleARDMap)); final RoleDescriptor limitedRoleDescriptor = new RoleDescriptor("limited role", new String[] { "all" }, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, - null); + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build() }, + null); Map limitedRdMap; try (XContentBuilder builder = JsonXContent.contentBuilder()) { limitedRdMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), @@ -235,7 +306,7 @@ public void testGetRolesForApiKey() throws Exception { authMetadata.put(ApiKeyService.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, Collections.singletonMap("limited role", limitedRdMap)); final Authentication authentication = new Authentication(new User("joe"), new RealmRef("apikey", "apikey", "node"), null, - Version.CURRENT, AuthenticationType.API_KEY, authMetadata); + Version.CURRENT, AuthenticationType.API_KEY, authMetadata); final NativePrivilegeStore privilegesStore = mock(NativePrivilegeStore.class); doAnswer(i -> { @@ -247,8 +318,7 @@ public void testGetRolesForApiKey() throws Exception { return null; } ).when(privilegesStore).getPrivileges(any(Collection.class), any(Collection.class), any(ActionListener.class)); - ApiKeyService service = new ApiKeyService(Settings.EMPTY, Clock.systemUTC(), null, null, - ClusterServiceUtils.createClusterService(threadPool), threadPool); + ApiKeyService service = createApiKeyService(Settings.EMPTY); PlainActionFuture roleFuture = new PlainActionFuture<>(); service.getRoleForApiKey(authentication, roleFuture); @@ -280,8 +350,7 @@ public void testApiKeyCache() { sourceMap.put("creator", creatorMap); sourceMap.put("api_key_invalidated", false); - ApiKeyService service = new ApiKeyService(Settings.EMPTY, Clock.systemUTC(), null, null, - ClusterServiceUtils.createClusterService(threadPool), threadPool); + ApiKeyService service = createApiKeyService(Settings.EMPTY); ApiKeyCredentials creds = new ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); @@ -345,8 +414,7 @@ public void testApiKeyCacheDisabled() { sourceMap.put("creator", creatorMap); sourceMap.put("api_key_invalidated", false); - ApiKeyService service = new ApiKeyService(settings, Clock.systemUTC(), null, null, - ClusterServiceUtils.createClusterService(threadPool), threadPool); + ApiKeyService service = createApiKeyService(settings); ApiKeyCredentials creds = new ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); @@ -355,4 +423,11 @@ public void testApiKeyCacheDisabled() { CachedApiKeyHashResult cachedApiKeyHashResult = service.getFromCache(creds.getId()); assertNull(cachedApiKeyHashResult); } + + private ApiKeyService createApiKeyService(Settings settings) { + return new ApiKeyService(settings, Clock.systemUTC(), client, licenseState, securityIndex, + ClusterServiceUtils.createClusterService(threadPool), threadPool); + } + + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 40d9a71d023d4..9e0e75ff46445 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.EmptyAuthorizationInfo; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; @@ -117,6 +118,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; @@ -145,9 +147,14 @@ public class AuthenticationServiceTests extends ESTestCase { private Client client; private InetSocketAddress remoteAddress; + private String concreteSecurityIndexName; + @Before @SuppressForbidden(reason = "Allow accessing localhost") public void init() throws Exception { + concreteSecurityIndexName = randomFrom( + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); + token = mock(AuthenticationToken.class); when(token.principal()).thenReturn(randomAlphaOfLength(5)); message = new InternalMessage(); @@ -171,9 +178,11 @@ public void init() throws Exception { XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.allowedRealmType()).thenReturn(XPackLicenseState.AllowedRealmType.ALL); when(licenseState.isAuthAllowed()).thenReturn(true); - realms = new TestRealms(Settings.EMPTY, TestEnvironment.newEnvironment(settings), Collections.emptyMap(), + when(licenseState.isApiKeyServiceAllowed()).thenReturn(true); + when(licenseState.isTokenServiceAllowed()).thenReturn(true); + realms = spy(new TestRealms(Settings.EMPTY, TestEnvironment.newEnvironment(settings), Collections.emptyMap(), licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm, secondRealm), - Collections.singletonList(firstRealm)); + Collections.singletonList(firstRealm))); auditTrail = mock(AuditTrailService.class); client = mock(Client.class); @@ -183,9 +192,9 @@ licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); when(client.prepareIndex(any(String.class), any(String.class), any(String.class))) - .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); + .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); when(client.prepareUpdate(any(String.class), any(String.class), any(String.class))) - .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); + .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); doAnswer(invocationOnMock -> { ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; responseActionListener.onResponse(new IndexResponse()); @@ -210,8 +219,8 @@ licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm return null; }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - apiKeyService = new ApiKeyService(settings, Clock.systemUTC(), client, securityIndex, clusterService, threadPool); - tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); + apiKeyService = new ApiKeyService(settings, Clock.systemUTC(), client, licenseState, securityIndex, clusterService, threadPool); + tokenService = new TokenService(settings, Clock.systemUTC(), client, licenseState, securityIndex, clusterService); service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(Collections.emptyMap()), threadPool, new AnonymousUser(settings), tokenService, apiKeyService); } @@ -276,6 +285,8 @@ public void testAuthenticateBothSupportSecondSucceeds() throws Exception { }, this::logAndFail)); assertTrue(completed.get()); verify(auditTrail).authenticationFailed(reqId, firstRealm.name(), token, "_action", message); + verify(realms).asList(); + verifyNoMoreInteractions(realms); } public void testAuthenticateSmartRealmOrdering() { @@ -1383,6 +1394,6 @@ private void setCompletedToTrue(AtomicBoolean completed) { } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(true, true, true, true, null, indexStatus); + return new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, indexStatus); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index c37d6913d1fd2..8c60b6c2f58f0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -39,10 +39,13 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -104,6 +107,8 @@ public void testWithSettings() throws Exception { assertThat(realm.name(), equalTo("realm_" + index)); i++; } + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception { @@ -142,6 +147,8 @@ public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception assertThat(realm.type(), equalTo("type_" + nameToRealmId.get(expectedRealmName))); assertThat(realm.name(), equalTo(expectedRealmName)); } + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testWithSettingsWithMultipleInternalRealmsOfSameType() throws Exception { @@ -175,6 +182,8 @@ public void testWithEmptySettings() throws Exception { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testUnlicensedWithOnlyCustomRealms() throws Exception { @@ -209,6 +218,8 @@ public void testUnlicensedWithOnlyCustomRealms() throws Exception { i++; } + assertThat(realms.getUnlicensedRealms(), empty()); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -225,6 +236,18 @@ public void testUnlicensedWithOnlyCustomRealms() throws Exception { assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(randomRealmTypesCount)); + iter = realms.getUnlicensedRealms().iterator(); + i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); @@ -240,6 +263,18 @@ public void testUnlicensedWithOnlyCustomRealms() throws Exception { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(randomRealmTypesCount)); + iter = realms.getUnlicensedRealms().iterator(); + i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } } public void testUnlicensedWithInternalRealms() throws Exception { @@ -266,6 +301,7 @@ public void testUnlicensedWithInternalRealms() throws Exception { types.add(realm.type()); } assertThat(types, contains("ldap", "type_0")); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -280,6 +316,11 @@ public void testUnlicensedWithInternalRealms() throws Exception { } assertThat(i, is(1)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("type_0")); + assertThat(realm.name(), equalTo("custom")); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); assertThat(iter.hasNext(), is(true)); @@ -294,6 +335,14 @@ public void testUnlicensedWithInternalRealms() throws Exception { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(2)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("ldap")); + assertThat(realm.name(), equalTo("foo")); + realm = realms.getUnlicensedRealms().get(1); + assertThat(realm.type(), equalTo("type_0")); + assertThat(realm.name(), equalTo("custom")); } public void testUnlicensedWithNativeRealmSettings() throws Exception { @@ -317,6 +366,7 @@ public void testUnlicensedWithNativeRealmSettings() throws Exception { realm = iter.next(); assertThat(realm.type(), is(type)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); @@ -327,6 +377,11 @@ public void testUnlicensedWithNativeRealmSettings() throws Exception { realm = iter.next(); assertThat(realm.type(), is(type)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("ldap")); + assertThat(realm.name(), equalTo("foo")); } public void testUnlicensedWithNonStandardRealms() throws Exception { @@ -346,6 +401,7 @@ public void testUnlicensedWithNonStandardRealms() throws Exception { realm = iter.next(); assertThat(realm.type(), is(selectedRealmType)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -360,6 +416,11 @@ public void testUnlicensedWithNonStandardRealms() throws Exception { assertThat(realm.type(), is(NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo(selectedRealmType)); + assertThat(realm.name(), equalTo("foo")); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); assertThat(iter.hasNext(), is(true)); @@ -372,6 +433,11 @@ public void testUnlicensedWithNonStandardRealms() throws Exception { realm = iter.next(); assertThat(realm.type(), is(NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo(selectedRealmType)); + assertThat(realm.name(), equalTo("foo")); } public void testDisabledRealmsAreNotAdded() throws Exception { @@ -422,6 +488,11 @@ public void testDisabledRealmsAreNotAdded() throws Exception { } assertThat(count, equalTo(orderToIndex.size())); + assertThat(realms.getUnlicensedRealms(), empty()); + + // check that disabled realms are not included in unlicensed realms + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + assertThat(realms.getUnlicensedRealms(), hasSize(orderToIndex.size())); } public void testAuthcAuthzDisabled() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index 61ea4ef967224..ef444587e4fb7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -43,7 +43,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.equalTo; @TestLogging("org.elasticsearch.xpack.security.authz.store.FileRolesStore:DEBUG") @@ -162,10 +164,10 @@ public void testExpiredTokensDeletedAfterExpiration() throws Exception { // hack doc to modify the creation time to the day before Instant yesterday = created.minus(36L, ChronoUnit.HOURS); assertTrue(Instant.now().isAfter(yesterday)); - client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, "doc", docId.get()) + client.prepareUpdate(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, docId.get()) .setDoc("creation_time", yesterday.toEpochMilli()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); AtomicBoolean deleteTriggered = new AtomicBoolean(false); assertBusy(() -> { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 8caf82e8648cb..227f44918baeb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.node.Node; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -47,11 +48,14 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.elasticsearch.xpack.security.test.SecurityMocks; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; +import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -59,7 +63,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.function.Consumer; import javax.crypto.SecretKey; @@ -87,6 +90,7 @@ public class TokenServiceTests extends ESTestCase { private ClusterService clusterService; private Settings tokenServiceEnabledSettings = Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + private XPackLicenseState licenseState; @Before public void setupClient() { @@ -111,20 +115,13 @@ public void setupClient() { }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); // setup lifecycle service - securityIndex = mock(SecurityIndexManager.class); - doAnswer(invocationOnMock -> { - Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; - runnable.run(); - return null; - }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); - doAnswer(invocationOnMock -> { - Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; - runnable.run(); - return null; - }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); - when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.isAvailable()).thenReturn(true); + securityIndex = SecurityMocks.mockSecurityIndexManager(); this.clusterService = ClusterServiceUtils.createClusterService(threadPool); + + // License state (enabled by default) + licenseState = mock(XPackLicenseState.class); + when(licenseState.isTokenServiceAllowed()).thenReturn(true); + } @BeforeClass @@ -141,7 +138,7 @@ public static void shutdownThreadpool() throws InterruptedException { } public void testAttachAndGetToken() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -162,8 +159,7 @@ public void testAttachAndGetToken() throws Exception { try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // verify a second separate token service with its own salt can also verify - TokenService anotherService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex - , clusterService); + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); anotherService.refreshMetaData(tokenService.getTokenMetaData()); PlainActionFuture future = new PlainActionFuture<>(); anotherService.getAndValidateToken(requestContext, future); @@ -173,7 +169,7 @@ public void testAttachAndGetToken() throws Exception { } public void testInvalidAuthorizationHeader() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); String token = randomFrom("", " "); String authScheme = randomFrom("Bearer ", "BEARER ", "bearer ", "Basic "); @@ -188,7 +184,7 @@ public void testInvalidAuthorizationHeader() throws Exception { } public void testRotateKey() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -198,7 +194,7 @@ public void testRotateKey() throws Exception { authentication = token.getAuthentication(); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -222,7 +218,7 @@ public void testRotateKey() throws Exception { assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(newToken)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(newToken)); mockGetTokenFromId(newToken, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { @@ -241,13 +237,12 @@ private void rotateKeys(TokenService tokenService) { } public void testKeyExchange() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { rotateKeys(tokenService); } - TokenService otherTokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, - clusterService); + TokenService otherTokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -258,7 +253,7 @@ public void testKeyExchange() throws Exception { authentication = token.getAuthentication(); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); @@ -279,7 +274,7 @@ public void testKeyExchange() throws Exception { } public void testPruneKeys() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -289,7 +284,7 @@ public void testPruneKeys() throws Exception { authentication = token.getAuthentication(); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -329,7 +324,7 @@ public void testPruneKeys() throws Exception { } requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(newToken)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(newToken)); mockGetTokenFromId(newToken, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -341,7 +336,7 @@ public void testPruneKeys() throws Exception { } public void testPassphraseWorks() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -351,7 +346,7 @@ public void testPassphraseWorks() throws Exception { authentication = token.getAuthentication(); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -362,8 +357,7 @@ public void testPassphraseWorks() throws Exception { try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // verify a second separate token service with its own passphrase cannot verify - TokenService anotherService = new TokenService(Settings.EMPTY, systemUTC(), client, securityIndex, - clusterService); + TokenService anotherService = createTokenService(tokenServiceEnabledSettings, systemUTC()); PlainActionFuture future = new PlainActionFuture<>(); anotherService.getAndValidateToken(requestContext, future); assertNull(future.get()); @@ -371,7 +365,7 @@ public void testPassphraseWorks() throws Exception { } public void testGetTokenWhenKeyCacheHasExpired() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -386,7 +380,7 @@ public void testGetTokenWhenKeyCacheHasExpired() throws Exception { public void testInvalidatedToken() throws Exception { when(securityIndex.indexExists()).thenReturn(true); TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -395,7 +389,7 @@ public void testInvalidatedToken() throws Exception { mockGetTokenFromId(token, true); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -407,6 +401,10 @@ public void testInvalidatedToken() throws Exception { } } + private void storeTokenHeader(ThreadContext requestContext, String tokenString) throws IOException, GeneralSecurityException { + requestContext.putHeader("Authorization", "Bearer " + tokenString); + } + public void testComputeSecretKeyIsConsistent() throws Exception { byte[] saltArr = new byte[32]; random().nextBytes(saltArr); @@ -440,7 +438,7 @@ public void testTokenExpiryConfig() { public void testTokenExpiry() throws Exception { ClockMock clock = ClockMock.frozen(); - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, clock); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -449,7 +447,7 @@ public void testTokenExpiry() throws Exception { authentication = token.getAuthentication(); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // the clock is still frozen, so the cookie should be valid @@ -493,10 +491,10 @@ public void testTokenServiceDisabled() throws Exception { TokenService tokenService = new TokenService(Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), - Clock.systemUTC(), client, securityIndex, clusterService); + Clock.systemUTC(), client, licenseState, securityIndex, clusterService); IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createUserToken(null, null, null, null, true)); - assertEquals("tokens are not enabled", e.getMessage()); + assertEquals("security tokens are not enabled", e.getMessage()); PlainActionFuture future = new PlainActionFuture<>(); tokenService.getAndValidateToken(null, future); @@ -507,7 +505,7 @@ public void testTokenServiceDisabled() throws Exception { tokenService.invalidateAccessToken((String) null, invalidateFuture); invalidateFuture.actionGet(); }); - assertEquals("tokens are not enabled", e.getMessage()); + assertEquals("security tokens are not enabled", e.getMessage()); } public void testBytesKeyEqualsHashCode() { @@ -536,10 +534,10 @@ public void testMalformedToken() throws Exception { final int numBytes = randomIntBetween(1, TokenService.MINIMUM_BYTES + 32); final byte[] randomBytes = new byte[numBytes]; random().nextBytes(randomBytes); - TokenService tokenService = new TokenService(Settings.EMPTY, systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(Settings.EMPTY, systemUTC()); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + Base64.getEncoder().encodeToString(randomBytes)); + storeTokenHeader(requestContext, Base64.getEncoder().encodeToString(randomBytes)); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -550,7 +548,7 @@ public void testMalformedToken() throws Exception { public void testIndexNotAvailable() throws Exception { TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); + createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap(), true); @@ -559,7 +557,7 @@ public void testIndexNotAvailable() throws Exception { //mockGetTokenFromId(token, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + storeTokenHeader(requestContext, tokenService.getUserTokenString(token)); doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; @@ -593,8 +591,7 @@ public void testIndexNotAvailable() throws Exception { } public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, Clock.systemUTC(), client, securityIndex, clusterService); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); mockGetTokenFromId(expired, false); @@ -605,6 +602,27 @@ public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { assertEquals(authentication, retrievedAuth); } + public void testCannotValidateTokenIfLicenseDoesNotAllowTokens() throws Exception { + when(licenseState.isTokenServiceAllowed()).thenReturn(true); + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + UserToken token = new UserToken(authentication, Instant.now().plusSeconds(180)); + mockGetTokenFromId(token, false); + + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + storeTokenHeader(threadContext, tokenService.getUserTokenString(token)); + + PlainActionFuture authFuture = new PlainActionFuture<>(); + when(licenseState.isTokenServiceAllowed()).thenReturn(false); + tokenService.getAndValidateToken(threadContext, authFuture); + UserToken authToken = authFuture.actionGet(); + assertThat(authToken, Matchers.nullValue()); + } + + private TokenService createTokenService(Settings settings, Clock clock) throws GeneralSecurityException { + return new TokenService(settings, clock, client, licenseState, securityIndex, clusterService); + } + private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { mockGetTokenFromId(userToken, isExpired, client); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 4a925f028a524..d547fe5a83960 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -8,6 +8,8 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -17,7 +19,10 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; @@ -46,6 +51,7 @@ import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; import org.junit.BeforeClass; @@ -58,12 +64,14 @@ import java.util.concurrent.CountDownLatch; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_SECURITY_INDEX; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -379,6 +387,69 @@ public void testCreateAndUpdateRole() { } } + public void testSnapshotDeleteRestore() { + logger.error("--> creating role"); + securityClient().preparePutRole("test_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"create_index"}, null, null, null, true) + .get(); + logger.error("--> creating user"); + securityClient().preparePutUser("joe", "s3krit".toCharArray(), hasher, "test_role", "snapshot_user").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> creating repository"); + assertAcked(client().admin().cluster() + .preparePutRepository("test-repo") + .setType("fs").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + // joe can snapshot all indices, including '.security' + SnapshotInfo snapshotInfo = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true) + .setIncludeGlobalState(false) + .setIndices(SECURITY_INDEX_NAME) + .get().getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.indices(), contains(SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + deleteSecurityIndex(); + // the realm cache should clear itself but we don't wish to race it + securityClient().prepareClearRealmCache().get(); + // authn fails + final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("idx").get()); + assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); + // users and roles are missing + GetUsersResponse getUsersResponse = securityClient().prepareGetUsers("joe").get(); + assertThat(getUsersResponse.users().length, is(0)); + GetRolesResponse getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + assertThat(getRolesResponse.roles().length, is(0)); + // restore + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIncludeAliases(true).get(); + assertThat(response.status(), equalTo(RestStatus.OK)); + assertThat(response.getRestoreInfo().indices(), contains(SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + // the realm cache should clear itself but we don't wish to race it + securityClient().prepareClearRealmCache().get(); + // users and roles are retrievable + getUsersResponse = securityClient().prepareGetUsers("joe").get(); + assertThat(getUsersResponse.users().length, is(1)); + assertThat(Arrays.asList(getUsersResponse.users()[0].roles()), contains("test_role", "snapshot_user")); + getRolesResponse = securityClient().prepareGetRoles("test_role").get(); + assertThat(getRolesResponse.roles().length, is(1)); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getClusterPrivileges()), contains("all")); + assertThat(getRolesResponse.roles()[0].getIndicesPrivileges().length, is(1)); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getIndicesPrivileges()[0].getPrivileges()), contains("create_index")); + assertThat(Arrays.asList(getRolesResponse.roles()[0].getIndicesPrivileges()[0].getIndices()), contains("*")); + // joe can create indices + CreateIndexResponse createIndexResponse = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin() + .indices().prepareCreate("idx").get(); + assertThat(createIndexResponse.isAcknowledged(), is (true)); + assertAcked(client().admin().cluster().prepareDeleteRepository("test-repo")); + } + public void testAuthenticateWithDeletedRole() { SecurityClient c = securityClient(); logger.error("--> creating role"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index f5425f59d6a1d..fb0d55c75cd32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.concurrent.atomic.AtomicInteger; @@ -21,8 +22,11 @@ public class NativeRealmTests extends ESTestCase { + private final String concreteSecurityIndexName = randomFrom( + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); + private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(true, true, true, true, null, indexStatus); + return new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, indexStatus); } public void testCacheClearOnIndexHealthChange() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 3d13119292b6c..ab82d18f2e0c7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; @@ -111,7 +112,7 @@ public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { final GetResult result = new GetResult( SecurityIndexManager.SECURITY_INDEX_NAME, - NativeUsersStore.INDEX_TYPE, + MapperService.SINGLE_MAPPING_NAME, NativeUsersStore.getIdForUser(NativeUsersStore.RESERVED_USER_TYPE, randomAlphaOfLength(12)), 0, 1, 1L, true, @@ -180,7 +181,7 @@ public void testVerifyNonExistentUser() throws Exception { final GetResult getResult = new GetResult( SecurityIndexManager.SECURITY_INDEX_NAME, - NativeUsersStore.INDEX_TYPE, + MapperService.SINGLE_MAPPING_NAME, NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), UNASSIGNED_SEQ_NO, 0, 1L, false, @@ -222,7 +223,7 @@ private void respondToGetUserRequest(String username, SecureString password, Str final BytesReference source = BytesReference.bytes(jsonBuilder().map(values)); final GetResult getResult = new GetResult( SecurityIndexManager.SECURITY_INDEX_NAME, - NativeUsersStore.INDEX_TYPE, + MapperService.SINGLE_MAPPING_NAME, NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), 0, 1, 1L, true, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java index dd4b747c5b19b..1e77dfc6b16e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java @@ -27,6 +27,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import static org.hamcrest.Matchers.containsString; + /** * This class tests {@link CommandLineHttpClient} For extensive tests related to * ssl settings can be found {@link SSLConfigurationSettingsTests} @@ -63,6 +65,15 @@ public void testCommandLineHttpClientCanExecuteAndReturnCorrectResultUsingSSLSet assertEquals("Http response body does not match", "complete", httpResponse.getResponseBody().get("test")); } + public void testGetDefaultURLFailsWithHelpfulMessage() { + Settings settings = Settings.builder() + .put("network.host", "_ec2:privateIpv4_") + .build(); + CommandLineHttpClient client = new CommandLineHttpClient(settings, environment); + assertThat(expectThrows(IllegalStateException.class, () -> client.getDefaultURL()).getMessage(), + containsString("unable to determine default URL from settings, please use the -u option to explicitly provide the url")); + } + private MockWebServer createMockWebServer() { Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java index 106eaa8932629..9867cc29fd3da 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java @@ -9,6 +9,7 @@ import com.unboundid.ldap.sdk.LDAPException; import com.unboundid.ldap.sdk.LDAPURL; import com.unboundid.ldap.sdk.SimpleBindRequest; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -29,6 +30,7 @@ import org.junit.After; import org.junit.Before; +import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -73,7 +75,12 @@ public void shutdown() throws InterruptedException { public void testBindWithReadTimeout() throws Exception { InMemoryDirectoryServer ldapServer = randomFrom(ldapServers); String protocol = randomFrom("ldap", "ldaps"); - String ldapUrl = new LDAPURL(protocol, "localhost", ldapServer.getListenPort(protocol), null, null, null, null).toString(); + InetAddress listenAddress = ldapServer.getListenAddress(protocol); + if (listenAddress == null) { + listenAddress = InetAddress.getLoopbackAddress(); + } + String ldapUrl = new LDAPURL(protocol, NetworkAddress.format(listenAddress), ldapServer.getListenPort(protocol), + null, null, null, null).toString(); String groupSearchBase = "o=sevenSeas"; String userTemplates = "cn={0},ou=people,o=sevenSeas"; @@ -233,7 +240,12 @@ public void testGroupLookupBase() throws Exception { */ public void testSslTrustIsReloaded() throws Exception { InMemoryDirectoryServer ldapServer = randomFrom(ldapServers); - String ldapUrl = new LDAPURL("ldaps", "localhost", ldapServer.getListenPort("ldaps"), null, null, null, null).toString(); + InetAddress listenAddress = ldapServer.getListenAddress("ldaps"); + if (listenAddress == null) { + listenAddress = InetAddress.getLoopbackAddress(); + } + String ldapUrl = new LDAPURL("ldaps", NetworkAddress.format(listenAddress), ldapServer.getListenPort("ldaps"), + null, null, null, null).toString(); String groupSearchBase = "o=sevenSeas"; String userTemplates = "cn={0},ou=people,o=sevenSeas"; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java index 2598b9da5507f..44498e0ae9762 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java @@ -59,7 +59,7 @@ public void init() throws Exception { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java index 2c0b2f7716650..957167e60d281 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java @@ -18,6 +18,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -25,6 +26,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; @@ -46,6 +48,7 @@ import javax.net.ssl.SSLServerSocketFactory; import javax.net.ssl.SSLSocketFactory; import javax.net.ssl.X509ExtendedKeyManager; +import java.net.InetAddress; import java.security.AccessController; import java.security.KeyStore; import java.security.PrivilegedAction; @@ -76,7 +79,7 @@ public void startLdap() throws Exception { for (int i = 0; i < numberOfLdapServers; i++) { InMemoryDirectoryServerConfig serverConfig = new InMemoryDirectoryServerConfig("o=sevenSeas"); List listeners = new ArrayList<>(2); - listeners.add(InMemoryListenerConfig.createLDAPConfig("ldap")); + listeners.add(InMemoryListenerConfig.createLDAPConfig("ldap", null, 0, null)); if (openLdapsPort()) { final char[] ldapPassword = "ldap-password".toCharArray(); final KeyStore ks = CertParsingUtils.getKeyStoreFromPEM( @@ -85,7 +88,7 @@ public void startLdap() throws Exception { ldapPassword ); X509ExtendedKeyManager keyManager = CertParsingUtils.keyManager(ks, ldapPassword, KeyManagerFactory.getDefaultAlgorithm()); - final SSLContext context = SSLContext.getInstance("TLSv1.2"); + final SSLContext context = SSLContext.getInstance(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS.get(0)); context.init(new KeyManager[] { keyManager }, null, null); SSLServerSocketFactory serverSocketFactory = context.getServerSocketFactory(); SSLSocketFactory clientSocketFactory = context.getSocketFactory(); @@ -111,7 +114,7 @@ protected boolean openLdapsPort() { } @After - public void stopLdap() throws Exception { + public void stopLdap() { for (int i = 0; i < numberOfLdapServers; i++) { ldapServers[i].shutDown(true); } @@ -120,7 +123,11 @@ public void stopLdap() throws Exception { protected String[] ldapUrls() throws LDAPException { List urls = new ArrayList<>(numberOfLdapServers); for (int i = 0; i < numberOfLdapServers; i++) { - LDAPURL url = new LDAPURL("ldap", "localhost", ldapServers[i].getListenPort(), null, null, null, null); + InetAddress listenAddress = ldapServers[i].getListenAddress(); + if (listenAddress == null) { + listenAddress = InetAddress.getLoopbackAddress(); + } + LDAPURL url = new LDAPURL("ldap", NetworkAddress.format(listenAddress), ldapServers[i].getListenPort(), null, null, null, null); urls.add(url.toString()); } return urls.toArray(Strings.EMPTY_ARRAY); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index 1483b2f474bf0..2614a1dc3d923 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -7,11 +7,16 @@ import com.unboundid.ldap.listener.InMemoryDirectoryServer; import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.network.InetAddressHelper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.mocksocket.MockSocket; @@ -26,14 +31,21 @@ import org.junit.Before; import java.io.IOException; +import java.net.ConnectException; +import java.net.Inet4Address; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.NoRouteToHostException; import java.net.Socket; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -52,7 +64,7 @@ public void init() throws Exception { } @After - public void shutdown() throws InterruptedException { + public void shutdown() { terminate(threadPool); } @@ -62,29 +74,22 @@ public void testRoundRobin() throws Exception { final int numberOfIterations = randomIntBetween(1, 5); for (int iteration = 0; iteration < numberOfIterations; iteration++) { for (int i = 0; i < numberOfLdapServers; i++) { - LDAPConnection connection = null; - try { - connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + try (LDAPConnection connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection)) { assertThat(connection.getConnectedPort(), is(ldapServers[i].getListenPort())); - } finally { - if (connection != null) { - connection.close(); - } } } } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32190") public void testRoundRobinWithFailures() throws Exception { - assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1); + assumeTrue("at least two ldap servers should be present for this test", ldapServers.length > 1); logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls()); TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.ROUND_ROBIN); // create a list of ports List ports = new ArrayList<>(numberOfLdapServers); - for (int i = 0; i < ldapServers.length; i++) { - ports.add(ldapServers[i].getListenPort()); + for (InMemoryDirectoryServer ldapServer : ldapServers) { + ports.add(ldapServer.getListenPort()); } logger.debug("list of all ports {}", ports); @@ -94,18 +99,18 @@ public void testRoundRobinWithFailures() throws Exception { // get a subset to kill final List ldapServersToKill = randomSubsetOf(numberToKill, ldapServers); final List ldapServersList = Arrays.asList(ldapServers); - final InetAddress local = InetAddress.getByName("localhost"); - final MockServerSocket mockServerSocket = new MockServerSocket(0, 0, local); + final MockServerSocket mockServerSocket = new MockServerSocket(0, 0); final List listenThreads = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(ldapServersToKill.size()); final CountDownLatch closeLatch = new CountDownLatch(1); try { + final AtomicBoolean success = new AtomicBoolean(true); for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) { final int index = ldapServersList.indexOf(ldapServerToKill); assertThat(index, greaterThanOrEqualTo(0)); - final Integer port = Integer.valueOf(ldapServers[index].getListenPort()); + final int port = ldapServers[index].getListenPort(); logger.debug("shutting down server index [{}] listening on [{}]", index, port); - assertTrue(ports.remove(port)); + assertTrue(ports.remove(Integer.valueOf(port))); ldapServers[index].shutDown(true); // when running multiple test jvms, there is a chance that something else could @@ -114,17 +119,9 @@ public void testRoundRobinWithFailures() throws Exception { // a mock server socket. // NOTE: this is not perfect as there is a small amount of time between the shutdown // of the ldap server and the opening of the socket - logger.debug("opening mock server socket listening on [{}]", port); - Runnable runnable = () -> { - try (Socket socket = openMockSocket(local, mockServerSocket.getLocalPort(), local, port)) { - logger.debug("opened socket [{}]", socket); - latch.countDown(); - closeLatch.await(); - logger.debug("closing socket [{}]", socket); - } catch (IOException | InterruptedException e) { - logger.debug("caught exception", e); - } - }; + logger.debug("opening mock client sockets bound to [{}]", port); + Runnable runnable = new PortBlockingRunnable(mockServerSocket.getInetAddress(), mockServerSocket.getLocalPort(), port, + latch, closeLatch, success); Thread thread = new Thread(runnable); thread.start(); listenThreads.add(thread); @@ -133,14 +130,37 @@ public void testRoundRobinWithFailures() throws Exception { } latch.await(); + + assumeTrue("Failed to open sockets on all addresses with the port that an LDAP server was bound to. Some operating systems " + + "allow binding to an address and port combination even if an application is bound to the port on a wildcard address", + success.get()); final int numberOfIterations = randomIntBetween(1, 5); + logger.debug("list of all open ports {}", ports); // go one iteration through and attempt a bind for (int iteration = 0; iteration < numberOfIterations; iteration++) { logger.debug("iteration [{}]", iteration); for (Integer port : ports) { logger.debug("attempting connection with expected port [{}]", port); - try (LDAPConnection connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection)) { + LDAPConnection connection = null; + try { + do { + final LDAPConnection finalConnection = + LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + connection = finalConnection; + logger.debug("established connection with port [{}] expected port [{}]", + finalConnection.getConnectedPort(), port); + if (finalConnection.getConnectedPort() != port) { + LDAPException e = expectThrows(LDAPException.class, () -> finalConnection.bind(new SimpleBindRequest())); + assertThat(e.getMessage(), containsString("not connected")); + finalConnection.close(); + } + } while (connection.getConnectedPort() != port); + assertThat(connection.getConnectedPort(), is(port)); + } finally { + if (connection != null) { + connection.close(); + } } } } @@ -160,76 +180,109 @@ private MockSocket openMockSocket(InetAddress remoteAddress, int remotePort, Ine socket.setReuseAddress(true); // allow binding even if the previous socket is in timed wait state. socket.setSoLinger(true, 0); // close immediately as we are not writing anything here. socket.bind(new InetSocketAddress(localAddress, localPort)); - SocketAccess.doPrivileged(() -> socket.connect(new InetSocketAddress(localAddress, remotePort))); + SocketAccess.doPrivileged(() -> socket.connect(new InetSocketAddress(remoteAddress, remotePort))); return socket; } public void testFailover() throws Exception { - assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1); + assumeTrue("at least two ldap servers should be present for this test", ldapServers.length > 1); logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls()); TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.FAILOVER); // first test that there is no round robin stuff going on final int firstPort = ldapServers[0].getListenPort(); for (int i = 0; i < numberOfLdapServers; i++) { - LDAPConnection connection = null; - try { - connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + try (LDAPConnection connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection)) { assertThat(connection.getConnectedPort(), is(firstPort)); - } finally { - if (connection != null) { - connection.close(); - } } } - logger.debug("shutting down server index [0] listening on [{}]", ldapServers[0].getListenPort()); - // always kill the first one - ldapServers[0].shutDown(true); - assertThat(ldapServers[0].getListenPort(), is(-1)); - - // now randomly shutdown some others + // we need at least one good server. Hence the upper bound is number - 2 since we need at least + // one server to use! + InMemoryDirectoryServer[] allButFirstServer = Arrays.copyOfRange(ldapServers, 1, ldapServers.length); + final List ldapServersToKill; if (ldapServers.length > 2) { - // kill at least one other server, but we need at least one good one. Hence the upper bound is number - 2 since we need at least - // one server to use! final int numberToKill = randomIntBetween(1, numberOfLdapServers - 2); - InMemoryDirectoryServer[] allButFirstServer = Arrays.copyOfRange(ldapServers, 1, ldapServers.length); - // get a subset to kil - final List ldapServersToKill = randomSubsetOf(numberToKill, allButFirstServer); - final List ldapServersList = Arrays.asList(ldapServers); - for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) { - final int index = ldapServersList.indexOf(ldapServerToKill); - assertThat(index, greaterThanOrEqualTo(1)); - final Integer port = Integer.valueOf(ldapServers[index].getListenPort()); - logger.debug("shutting down server index [{}] listening on [{}]", index, port); - ldapServers[index].shutDown(true); - assertThat(ldapServers[index].getListenPort(), is(-1)); - } + ldapServersToKill = randomSubsetOf(numberToKill, allButFirstServer); + ldapServersToKill.add(ldapServers[0]); // always kill the first one + } else { + ldapServersToKill = Collections.singletonList(ldapServers[0]); } + final List ldapServersList = Arrays.asList(ldapServers); + final MockServerSocket mockServerSocket = new MockServerSocket(0, 0); + final List listenThreads = new ArrayList<>(); + final CountDownLatch latch = new CountDownLatch(ldapServersToKill.size()); + final CountDownLatch closeLatch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(true); + for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) { + final int index = ldapServersList.indexOf(ldapServerToKill); + final int port = ldapServers[index].getListenPort(); + logger.debug("shutting down server index [{}] listening on [{}]", index, port); + ldapServers[index].shutDown(true); - int firstNonStoppedPort = -1; - // now we find the first that isn't stopped - for (int i = 0; i < numberOfLdapServers; i++) { - if (ldapServers[i].getListenPort() != -1) { - firstNonStoppedPort = ldapServers[i].getListenPort(); - break; - } + // when running multiple test jvms, there is a chance that something else could + // start listening on this port so we try to avoid this by creating a local socket + // that will be bound to the port the ldap server was running on and connecting to + // a mock server socket. + // NOTE: this is not perfect as there is a small amount of time between the shutdown + // of the ldap server and the opening of the socket + logger.debug("opening mock server socket listening on [{}]", port); + Runnable runnable = new PortBlockingRunnable(mockServerSocket.getInetAddress(), mockServerSocket.getLocalPort(), port, + latch, closeLatch, success); + Thread thread = new Thread(runnable); + thread.start(); + listenThreads.add(thread); + + assertThat(ldapServers[index].getListenPort(), is(-1)); } - logger.debug("first non stopped port [{}]", firstNonStoppedPort); - assertThat(firstNonStoppedPort, not(-1)); - final int numberOfIterations = randomIntBetween(1, 5); - for (int iteration = 0; iteration < numberOfIterations; iteration++) { - LDAPConnection connection = null; - try { + try { + latch.await(); + + assumeTrue("Failed to open sockets on all addresses with the port that an LDAP server was bound to. Some operating systems " + + "allow binding to an address and port combination even if an application is bound to the port on a wildcard address", + success.get()); + int firstNonStoppedPort = -1; + // now we find the first that isn't stopped + for (int i = 0; i < numberOfLdapServers; i++) { + if (ldapServers[i].getListenPort() != -1) { + firstNonStoppedPort = ldapServers[i].getListenPort(); + break; + } + } + logger.debug("first non stopped port [{}]", firstNonStoppedPort); + assertThat(firstNonStoppedPort, not(-1)); + final int numberOfIterations = randomIntBetween(1, 5); + for (int iteration = 0; iteration < numberOfIterations; iteration++) { logger.debug("attempting connection with expected port [{}] iteration [{}]", firstNonStoppedPort, iteration); - connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); - assertThat(connection.getConnectedPort(), is(firstNonStoppedPort)); - } finally { - if (connection != null) { - connection.close(); + LDAPConnection connection = null; + try { + do { + final LDAPConnection finalConnection = + LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + connection = finalConnection; + logger.debug("established connection with port [{}] expected port [{}]", + finalConnection.getConnectedPort(), firstNonStoppedPort); + if (finalConnection.getConnectedPort() != firstNonStoppedPort) { + LDAPException e = expectThrows(LDAPException.class, () -> finalConnection.bind(new SimpleBindRequest())); + assertThat(e.getMessage(), containsString("not connected")); + finalConnection.close(); + } + } while (connection.getConnectedPort() != firstNonStoppedPort); + + assertThat(connection.getConnectedPort(), is(firstNonStoppedPort)); + } finally { + if (connection != null) { + connection.close(); + } } } + } finally { + closeLatch.countDown(); + mockServerSocket.close(); + for (Thread t : listenThreads) { + t.join(); + } } } @@ -245,6 +298,92 @@ private TestSessionFactory createSessionFactory(LdapLoadBalancing loadBalancing) threadPool); } + private class PortBlockingRunnable implements Runnable { + + private final InetAddress serverAddress; + private final int serverPort; + private final int portToBind; + private final CountDownLatch latch; + private final CountDownLatch closeLatch; + private final AtomicBoolean success; + + private PortBlockingRunnable(InetAddress serverAddress, int serverPort, int portToBind, CountDownLatch latch, + CountDownLatch closeLatch, AtomicBoolean success) { + this.serverAddress = serverAddress; + this.serverPort = serverPort; + this.portToBind = portToBind; + this.latch = latch; + this.closeLatch = closeLatch; + this.success = success; + } + + @Override + public void run() { + final List openedSockets = new ArrayList<>(); + final List blacklistedAddress = new ArrayList<>(); + try { + final boolean allSocketsOpened = awaitBusy(() -> { + try { + InetAddress[] allAddresses = InetAddressHelper.getAllAddresses(); + if (serverAddress instanceof Inet4Address) { + allAddresses = InetAddressHelper.filterIPV4(allAddresses); + } else { + allAddresses = InetAddressHelper.filterIPV6(allAddresses); + } + final List inetAddressesToBind = Arrays.stream(allAddresses) + .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) + .filter(addr -> blacklistedAddress.contains(addr) == false) + .collect(Collectors.toList()); + for (InetAddress localAddress : inetAddressesToBind) { + try { + final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); + openedSockets.add(socket); + logger.debug("opened socket [{}]", socket); + } catch (NoRouteToHostException e) { + logger.debug(new ParameterizedMessage("blacklisting address [{}] due to:", localAddress), e); + blacklistedAddress.add(localAddress); + } catch (ConnectException e) { + logger.debug(new ParameterizedMessage("blacklisting address [{}] due to:", localAddress), e); + blacklistedAddress.add(localAddress); + } + } + if (openedSockets.size() == 0) { + logger.debug("Could not open any sockets from the available addresses"); + return false; + } + return true; + } catch (IOException e) { + logger.debug(new ParameterizedMessage("caught exception while opening socket on [{}]", portToBind), e); + return false; + } + }); + + if (allSocketsOpened) { + latch.countDown(); + } else { + success.set(false); + IOUtils.closeWhileHandlingException(openedSockets); + openedSockets.clear(); + latch.countDown(); + return; + } + } catch (InterruptedException e) { + logger.debug(new ParameterizedMessage("interrupted while trying to open sockets on [{}]", portToBind), e); + Thread.currentThread().interrupt(); + } + + try { + closeLatch.await(); + } catch (InterruptedException e) { + logger.debug("caught exception while waiting for close latch", e); + Thread.currentThread().interrupt(); + } finally { + logger.debug("closing sockets on [{}]", portToBind); + IOUtils.closeWhileHandlingException(openedSockets); + } + } + } + static class TestSessionFactory extends SessionFactory { protected TestSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java index 8d10f3ffb6946..7d5132ffb9f8a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -1374,7 +1374,7 @@ public void testContentIsAcceptedIfRestrictedToOurAudience() throws Exception { } public void testContentIsRejectedIfNotMarkedAsSuccess() throws Exception { - final String xml = getSimpleResponse(clock.instant()).replace(StatusCode.SUCCESS, StatusCode.REQUESTER); + final String xml = getStatusFailedResponse(); final SamlToken token = token(signDoc(xml)); final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); assertThat(exception.getMessage(), containsString("not a 'success' response")); @@ -1408,8 +1408,7 @@ public void testSignatureWrappingAttackOne() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); final Element clonedResponse = (Element) response.cloneNode(true); final Element clonedSignature = (Element) clonedResponse. getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); @@ -1443,8 +1442,7 @@ public void testSignatureWrappingAttackTwo() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); final Element clonedResponse = (Element) response.cloneNode(true); final Element clonedSignature = (Element) clonedResponse. getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); @@ -1482,8 +1480,7 @@ public void testSignatureWrappingAttackThree() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); final Element assertion = (Element) legitimateDocument. getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); final Element forgedAssertion = (Element) assertion.cloneNode(true); @@ -1522,10 +1519,8 @@ public void testSignatureWrappingAttackFour() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); - final Element assertion = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument.getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); final Element forgedAssertion = (Element) assertion.cloneNode(true); forgedAssertion.setAttribute("ID", "_forged_assertion_id"); final Element clonedSignature = (Element) forgedAssertion. @@ -1559,17 +1554,14 @@ public void testSignatureWrappingAttackFive() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); - final Element assertion = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument.getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); final Element signature = (Element) assertion. - getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); assertion.removeChild(signature); final Element forgedAssertion = (Element) assertion.cloneNode(true); forgedAssertion.setAttribute("ID", "_forged_assertion_id"); - final Element issuer = (Element) forgedAssertion. - getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + final Element issuer = (Element) forgedAssertion.getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); forgedAssertion.insertBefore(signature, issuer.getNextSibling()); response.insertBefore(forgedAssertion, assertion); final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); @@ -1598,10 +1590,8 @@ public void testSignatureWrappingAttackSix() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); - final Element assertion = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument.getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); final Element forgedAssertion = (Element) assertion.cloneNode(true); forgedAssertion.setAttribute("ID", "_forged_assertion_id"); final Element signature = (Element) assertion. @@ -1610,8 +1600,7 @@ public void testSignatureWrappingAttackSix() throws Exception { getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); forgedAssertion.removeChild(forgedSignature); assertion.removeChild(signature); - final Element issuer = (Element) forgedAssertion. - getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + final Element issuer = (Element) forgedAssertion.getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); forgedAssertion.insertBefore(signature, issuer.getNextSibling()); signature.appendChild(assertion); response.appendChild(forgedAssertion); @@ -1642,11 +1631,9 @@ public void testSignatureWrappingAttackSeven() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); final Element extensions = legitimateDocument.createElement("Extensions"); - final Element assertion = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element assertion = (Element) legitimateDocument.getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); response.insertBefore(extensions, assertion); final Element forgedAssertion = (Element) assertion.cloneNode(true); forgedAssertion.setAttribute("ID", "_forged_assertion_id"); @@ -1683,10 +1670,8 @@ public void testSignatureWrappingAttackEight() throws Exception { */ - final Element response = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20P_NS, "Response").item(0); - final Element assertion = (Element) legitimateDocument. - getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element response = (Element) legitimateDocument.getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument.getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); final Element forgedAssertion = (Element) assertion.cloneNode(true); forgedAssertion.setAttribute("ID", "_forged_assertion_id"); final Element signature = (Element) assertion. @@ -1695,8 +1680,7 @@ public void testSignatureWrappingAttackEight() throws Exception { getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); forgedAssertion.removeChild(forgedSignature); assertion.removeChild(signature); - final Element issuer = (Element) forgedAssertion. - getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + final Element issuer = (Element) forgedAssertion.getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); forgedAssertion.insertBefore(signature, issuer.getNextSibling()); Element object = legitimateDocument.createElement("Object"); object.appendChild(assertion); @@ -2034,7 +2018,7 @@ private void encryptElement(Element element, X509Certificate certificate, boolea } private Element buildEncryptedKeyElement(Document document, EncryptedKey encryptedKey, X509Certificate certificate) - throws XMLSecurityException { + throws XMLSecurityException { final XMLCipher cipher = XMLCipher.getInstance(); final org.apache.xml.security.keys.KeyInfo keyInfo = new org.apache.xml.security.keys.KeyInfo(document); final X509Data x509Data = new X509Data(document); @@ -2054,6 +2038,23 @@ private Response toResponse(String xml) throws SAXException, IOException, Parser return authenticator.buildXmlObject(doc.getDocumentElement(), Response.class); } + private String getStatusFailedResponse() { + final Instant now = clock.instant(); + return "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + + ""; + } + private String getSimpleResponse(Instant now) { return getSimpleResponse(now, randomAlphaOfLengthBetween(12, 18), randomId()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java index 7bf13e8be265c..c35561102020b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java @@ -53,7 +53,7 @@ private static boolean isTurkishLocale() { } @AfterClass - public static void restoreLocale() throws Exception { + public static void restoreLocale() { if (restoreLocale != null) { Locale.setDefault(restoreLocale); restoreLocale = null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java index d04f0ad7f9383..51ea82fc0e431 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java @@ -49,27 +49,25 @@ public void testMatching() throws Exception { } public void testParsingMalformedInput() { - Predicate predicate = new UserRoleMapper.DistinguishedNamePredicate(null); - assertPredicate(predicate, null, true); - assertPredicate(predicate, "", false); - assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false); - - predicate = new UserRoleMapper.DistinguishedNamePredicate(""); + Predicate predicate = new UserRoleMapper.DistinguishedNamePredicate(""); assertPredicate(predicate, null, false); assertPredicate(predicate, "", true); assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false); + assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8) + "*", false); predicate = new UserRoleMapper.DistinguishedNamePredicate("foo="); assertPredicate(predicate, null, false); assertPredicate(predicate, "foo", false); assertPredicate(predicate, "foo=", true); assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false); + assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12) + "*", false); predicate = new UserRoleMapper.DistinguishedNamePredicate("=bar"); assertPredicate(predicate, null, false); assertPredicate(predicate, "bar", false); assertPredicate(predicate, "=bar", true); assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false); + assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12) + "*", false); } private void assertPredicate(Predicate predicate, Object value, boolean expected) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 729bd08d7faf3..5f4ab6acc1f25 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -17,12 +17,14 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.Locale; @@ -42,17 +44,17 @@ public void setupMapping() throws Exception { Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); } - public void testParseValidJson() throws Exception { + public void testValidExpressionWithFixedRoleNames() throws Exception { String json = "{" - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"enabled\": true, " - + "\"rules\": { " - + " \"all\": [ " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " - + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" - + " ]}" - + "}"; - final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; + ExpressionRoleMapping mapping = parse(json, "ldap_sales"); assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); assertThat(mapping.getExpression(), instanceOf(AllExpression.class)); @@ -79,12 +81,48 @@ public void testParseValidJson() throws Exception { Collections.emptyList(), Collections.singletonMap("active", true), realm ); + final UserRoleMapper.UserData user4 = new UserRoleMapper.UserData( + "peter.null", null, Collections.emptyList(), Collections.singletonMap("active", true), realm + ); + assertThat(mapping.getExpression().match(user1a.asModel()), equalTo(true)); assertThat(mapping.getExpression().match(user1b.asModel()), equalTo(true)); assertThat(mapping.getExpression().match(user1c.asModel()), equalTo(true)); assertThat(mapping.getExpression().match(user1d.asModel()), equalTo(true)); - assertThat(mapping.getExpression().match(user2.asModel()), equalTo(false)); - assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); + assertThat(mapping.getExpression().match(user2.asModel()), equalTo(false)); // metadata.active == false + assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); // dn != ou=sales,dc=example,dc=com + assertThat(mapping.getExpression().match(user4.asModel()), equalTo(false)); // dn == null + + // expression without dn + json = "{" + + "\"roles\": [ \"superuser\", \"system_admin\", \"admin\" ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"any\": [ " + + " { \"field\": { \"username\" : \"tony.stark\" } }, " + + " { \"field\": { \"groups\": \"cn=admins,dc=stark-enterprises,dc=com\" } }" + + " ]}" + + "}"; + mapping = parse(json, "stark_admin"); + assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("superuser", "system_admin", "admin")); + assertThat(mapping.getExpression(), instanceOf(AnyExpression.class)); + + final UserRoleMapper.UserData userTony = new UserRoleMapper.UserData( + "tony.stark", null, Collections.singletonList("Audi R8 owners"), Collections.singletonMap("boss", true), realm + ); + final UserRoleMapper.UserData userPepper = new UserRoleMapper.UserData( + "pepper.potts", null, Arrays.asList("marvel", "cn=admins,dc=stark-enterprises,dc=com"), null, realm + ); + final UserRoleMapper.UserData userMax = new UserRoleMapper.UserData( + "max.rockatansky", null, Collections.singletonList("bronze"), Collections.singletonMap("mad", true), realm + ); + final UserRoleMapper.UserData userFinn = new UserRoleMapper.UserData( + "finn.hackleberry", null, Arrays.asList("hacker", null), null, realm + ); + assertThat(mapping.getExpression().match(userTony.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(userPepper.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(userMax.asModel()), equalTo(false)); + assertThat(mapping.getExpression().match(userFinn.asModel()), equalTo(false)); } public void testParsingFailsIfRulesAreMissing() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 9f11fd674ba86..29407a8672982 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -46,6 +47,8 @@ import static org.mockito.Mockito.when; public class NativeRoleMappingStoreTests extends ESTestCase { + private final String concreteSecurityIndexName = randomFrom( + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); public void testResolveRoles() throws Exception { // Does match DN @@ -124,7 +127,7 @@ private String randomiseDn(String dn) { } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(true, true, true, true, null, indexStatus); + return new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, indexStatus); } public void testCacheClearOnIndexHealthChange() { @@ -169,13 +172,13 @@ public void testCacheClearOnIndexOutOfDateChange() { final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); store.onSecurityIndexStateChange( - new SecurityIndexManager.State(true, false, true, true, null, null), - new SecurityIndexManager.State(true, true, true, true, null, null)); + new SecurityIndexManager.State(true, false, true, true, null, concreteSecurityIndexName, null), + new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, null)); assertEquals(1, numInvalidation.get()); store.onSecurityIndexStateChange( - new SecurityIndexManager.State(true, true, true, true, null, null), - new SecurityIndexManager.State(true, false, true, true, null, null)); + new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, null), + new SecurityIndexManager.State(true, false, true, true, null, concreteSecurityIndexName, null)); assertEquals(2, numInvalidation.get()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 7c4cd564e9993..29d02326cd214 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -124,6 +124,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.audit.AuditLevel; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; @@ -261,6 +262,9 @@ public void testActionsForSystemUserIsAuthorized() throws IOException { "indices:admin/seq_no/global_checkpoint_sync", "indices:admin/seq_no/retention_lease_sync", "indices:admin/seq_no/retention_lease_background_sync", + "indices:admin/seq_no/add_retention_lease", + "indices:admin/seq_no/remove_retention_lease", + "indices:admin/seq_no/renew_retention_lease", "indices:admin/settings/update" }; for (String action : actions) { authorize(authentication, action, request); @@ -1164,14 +1168,24 @@ public void testAuthorizationOfIndividualBulkItems() throws IOException { final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); authorize(authentication, action, request); - verify(auditTrail, times(2)).accessGranted(eq(requestId), eq(authentication), eq(DeleteAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); // concrete-index and alias-2 delete - verify(auditTrail, times(2)).accessGranted(eq(requestId), eq(authentication), eq(IndexAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); // concrete-index and alias-1 index - verify(auditTrail).accessDenied(eq(requestId), eq(authentication), eq(DeleteAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); // alias-1 delete - verify(auditTrail).accessDenied(eq(requestId), eq(authentication), eq(IndexAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); // alias-2 index + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), + eq(DeleteAction.NAME), eq("concrete-index"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), + eq(DeleteAction.NAME), eq("alias-2"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), + eq(IndexAction.NAME), eq("concrete-index"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), + eq(IndexAction.NAME), eq("alias-1"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), + eq(DeleteAction.NAME), eq("alias-1"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), + eq(IndexAction.NAME), eq("alias-2"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).accessGranted(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(new String[] { role.getName() })); // bulk request is allowed verifyNoMoreInteractions(auditTrail); @@ -1200,10 +1214,12 @@ public void testAuthorizationOfIndividualBulkItemsWithDateMath() throws IOExcept authorize(authentication, action, request); // both deletes should fail - verify(auditTrail, times(2)).accessDenied(eq(requestId), eq(authentication), eq(DeleteAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); - verify(auditTrail, times(2)).accessGranted(eq(requestId), eq(authentication), eq(IndexAction.NAME), eq(request), - authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail, times(2)).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), + eq(DeleteAction.NAME), Matchers.startsWith("datemath-"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); + verify(auditTrail, times(2)).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), + eq(IndexAction.NAME), Matchers.startsWith("datemath-"), eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); // bulk request is allowed verify(auditTrail).accessGranted(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(new String[]{role.getName()})); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index c0dc86315888b..202c9cb715f58 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -47,6 +47,8 @@ public void testAuthorizedIndicesUserWithSomeRoles() { RoleDescriptor bRole = new RoleDescriptor("b", null, new IndicesPrivileges[] { IndicesPrivileges.builder().indices("b").privileges("READ").build() }, null); Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("a1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("a2").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) @@ -59,7 +61,7 @@ public void testAuthorizedIndicesUserWithSomeRoles() { .putAlias(new AliasMetaData.Builder("ab").build()) .putAlias(new AliasMetaData.Builder("ba").build()) .build(), true) - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder(internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -75,7 +77,7 @@ public void testAuthorizedIndicesUserWithSomeRoles() { assertThat(list, containsInAnyOrder("a1", "a2", "aaaaaa", "b", "ab")); assertFalse(list.contains("bbbbb")); assertFalse(list.contains("ba")); - assertThat(list, not(contains(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX))); + assertThat(list, not(contains(internalSecurityIndex))); assertThat(list, not(contains(RestrictedIndicesNames.SECURITY_INDEX_NAME))); } @@ -99,10 +101,13 @@ public void testSecurityIndicesAreRestrictedForDefaultRole() { .cluster(ClusterPrivilege.ALL) .build(); Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder( + internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -113,7 +118,7 @@ public void testSecurityIndicesAreRestrictedForDefaultRole() { List authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole(role, SearchAction.NAME, metaData.getAliasAndIndexLookup()); assertThat(authorizedIndices, containsInAnyOrder("an-index", "another-index")); - assertThat(authorizedIndices, not(contains(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX))); + assertThat(authorizedIndices, not(contains(internalSecurityIndex))); assertThat(authorizedIndices, not(contains(RestrictedIndicesNames.SECURITY_INDEX_NAME))); } @@ -123,10 +128,12 @@ public void testSecurityIndicesAreNotRemovedFromUnrestrictedRole() { .cluster(ClusterPrivilege.ALL) .build(); Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder(internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -137,11 +144,11 @@ public void testSecurityIndicesAreNotRemovedFromUnrestrictedRole() { List authorizedIndices = RBACEngine.resolveAuthorizedIndicesFromRole(role, SearchAction.NAME, metaData.getAliasAndIndexLookup()); assertThat(authorizedIndices, containsInAnyOrder( - "an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME, SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + "an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME, internalSecurityIndex)); List authorizedIndicesSuperUser = RBACEngine.resolveAuthorizedIndicesFromRole(role, SearchAction.NAME, metaData.getAliasAndIndexLookup()); assertThat(authorizedIndicesSuperUser, containsInAnyOrder( - "an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME, SecurityIndexManager.INTERNAL_SECURITY_INDEX)); + "an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME, internalSecurityIndex)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index 2f09b74ac3d53..dc32580980e02 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -72,6 +72,7 @@ import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -104,7 +105,6 @@ public class IndicesAndAliasesResolverTests extends ESTestCase { private IndicesAndAliasesResolver defaultIndicesResolver; private IndexNameExpressionResolver indexNameExpressionResolver; private Map roleMap; - private FieldPermissionsCache fieldPermissionsCache; @Before public void setup() { @@ -138,13 +138,15 @@ public void setup() { .put(indexBuilder("-index11").settings(settings)) .put(indexBuilder("-index20").settings(settings)) .put(indexBuilder("-index21").settings(settings)) + .put(indexBuilder("logs-00001").putAlias(AliasMetaData.builder("logs-alias").writeIndex(false)).settings(settings)) + .put(indexBuilder("logs-00002").putAlias(AliasMetaData.builder("logs-alias").writeIndex(false)).settings(settings)) + .put(indexBuilder("logs-00003").putAlias(AliasMetaData.builder("logs-alias").writeIndex(true)).settings(settings)) .put(indexBuilder(securityIndexName).settings(settings)).build(); if (withAlias) { metaData = SecurityTestUtils.addAliasToMetaData(metaData, securityIndexName); } this.metaData = metaData; - this.fieldPermissionsCache = new FieldPermissionsCache(settings); user = new User("user", "role"); userDashIndices = new User("dash", "dash"); @@ -1355,6 +1357,29 @@ public void testDynamicPutMappingRequestFromAlias() { request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID())); putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData); assertEquals(index, putMappingIndexOrAlias); + + } + + public void testWhenAliasToMultipleIndicesAndUserIsAuthorizedUsingAliasReturnsAliasNameForDynamicPutMappingRequestOnWriteIndex() { + String index = "logs-00003"; // write index + PutMappingRequest request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID())); + List authorizedIndices = Collections.singletonList("logs-alias"); + assert metaData.getAliasAndIndexLookup().get("logs-alias").getIndices().size() == 3; + String putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData); + String message = "user is authorized to access `logs-alias` and the put mapping request is for a write index" + + "so this should have returned the alias name"; + assertEquals(message, "logs-alias", putMappingIndexOrAlias); + } + + public void testWhenAliasToMultipleIndicesAndUserIsAuthorizedUsingAliasReturnsIndexNameForDynamicPutMappingRequestOnReadIndex() { + String index = "logs-00002"; // read index + PutMappingRequest request = new PutMappingRequest(Strings.EMPTY_ARRAY).setConcreteIndex(new Index(index, UUIDs.base64UUID())); + List authorizedIndices = Collections.singletonList("logs-alias"); + assert metaData.getAliasAndIndexLookup().get("logs-alias").getIndices().size() == 3; + String putMappingIndexOrAlias = IndicesAndAliasesResolver.getPutMappingIndexOrAlias(request, authorizedIndices, metaData); + String message = "user is authorized to access `logs-alias` and the put mapping request is for a read index" + + "so this should have returned the concrete index as fallback"; + assertEquals(message, index, putMappingIndexOrAlias); } // TODO with the removal of DeleteByQuery is there another way to test resolving a write action? diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index e43ca6bbc0b6f..5c2e964c743c6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges.ManageApplicationPrivileges; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.user.User; @@ -493,6 +494,130 @@ public void testCheckingIndexPermissionsDefinedOnDifferentPatterns() throws Exce )); } + public void testCheckExplicitRestrictedIndexPermissions() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 12)); + Authentication authentication = mock(Authentication.class); + when(authentication.getUser()).thenReturn(user); + final boolean restrictedIndexPermission = randomBoolean(); + final boolean restrictedMonitorPermission = randomBoolean(); + Role role = Role.builder("role") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.INDEX, restrictedIndexPermission, ".sec*") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.MONITOR, restrictedMonitorPermission, ".security*") + .build(); + RBACAuthorizationInfo authzInfo = new RBACAuthorizationInfo(role, null); + + String explicitRestrictedIndex = randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES); + HasPrivilegesResponse response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(new String[] {".secret-non-restricted", explicitRestrictedIndex}) + .privileges("index", "monitor") + .allowRestrictedIndices(false) // explicit false for test + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".secret-non-restricted") // matches ".sec*" but not ".security*" + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build(), + ResourcePrivileges.builder(explicitRestrictedIndex) // matches both ".sec*" and ".security*" + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", restrictedIndexPermission).put("monitor", restrictedMonitorPermission).map()).build())); + + explicitRestrictedIndex = randomFrom(RestrictedIndicesNames.RESTRICTED_NAMES); + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(new String[] {".secret-non-restricted", explicitRestrictedIndex}) + .privileges("index", "monitor") + .allowRestrictedIndices(true) // explicit true for test + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".secret-non-restricted") // matches ".sec*" but not ".security*" + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build(), + ResourcePrivileges.builder(explicitRestrictedIndex) // matches both ".sec*" and ".security*" + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", restrictedIndexPermission).put("monitor", restrictedMonitorPermission).map()).build())); + } + + public void testCheckRestrictedIndexWildcardPermissions() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 12)); + Authentication authentication = mock(Authentication.class); + when(authentication.getUser()).thenReturn(user); + Role role = Role.builder("role") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.INDEX, false, ".sec*") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.MONITOR, true, ".security*") + .build(); + RBACAuthorizationInfo authzInfo = new RBACAuthorizationInfo(role, null); + + HasPrivilegesResponse response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(".sec*", ".security*") + .privileges("index", "monitor") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".sec*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build(), + ResourcePrivileges.builder(".security*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", true).map()).build() + )); + + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(".sec*", ".security*") + .privileges("index", "monitor") + .allowRestrictedIndices(true) + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".sec*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).put("monitor", false).map()).build(), + ResourcePrivileges.builder(".security*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", false).put("monitor", true).map()).build() + )); + + role = Role.builder("role") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.INDEX, true, ".sec*") + .add(FieldPermissions.DEFAULT, null, IndexPrivilege.MONITOR, false, ".security*") + .build(); + authzInfo = new RBACAuthorizationInfo(role, null); + + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(".sec*", ".security*") + .privileges("index", "monitor") + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".sec*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build(), + ResourcePrivileges.builder(".security*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", true).map()).build() + )); + + response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices(".sec*", ".security*") + .privileges("index", "monitor") + .allowRestrictedIndices(true) + .build(), authentication, authzInfo, Collections.emptyList(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + ResourcePrivileges.builder(".sec*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build(), + ResourcePrivileges.builder(".security*") + .addPrivileges(MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("monitor", false).map()).build() + )); + } + public void testCheckingApplicationPrivilegesOnDifferentApplicationsAndResources() throws Exception { List privs = new ArrayList<>(); final ApplicationPrivilege app1Read = defineApplicationPrivilege(privs, "app1", "read", "data:read/*"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index 9a6909aad26fd..8a79bc86f6702 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -22,7 +22,7 @@ import java.util.Collections; import java.util.Locale; -import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_INDEX; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_INDEX_NAME; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -56,7 +56,7 @@ public void setupClusterBeforeSnapshot() { final String snapshotUserToken = basicAuthHeaderValue(user, new SecureString(password)); client = client().filterWithHeader(Collections.singletonMap("Authorization", snapshotUserToken)); securityClient().preparePutUser(user, password, Hasher.BCRYPT, "snapshot_user").get(); - ensureGreen(INTERNAL_SECURITY_INDEX); + ensureGreen(INTERNAL_SECURITY_INDEX_7); } public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { @@ -67,17 +67,17 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { assertThat(getRepositoriesResponse.repositories().get(0).name(), is("repo")); // view all indices, including restricted ones final GetIndexResponse getIndexResponse = client.admin().indices().prepareGetIndex().setIndices(randomFrom("_all", "*")).get(); - assertThat(Arrays.asList(getIndexResponse.indices()), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + assertThat(Arrays.asList(getIndexResponse.indices()), containsInAnyOrder(INTERNAL_SECURITY_INDEX_7, ordinaryIndex)); // create snapshot that includes restricted indices final CreateSnapshotResponse snapshotResponse = client.admin().cluster().prepareCreateSnapshot("repo", "snap") .setIndices(randomFrom("_all", "*")).setWaitForCompletion(true).get(); assertThat(snapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); - assertThat(snapshotResponse.getSnapshotInfo().indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + assertThat(snapshotResponse.getSnapshotInfo().indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX_7, ordinaryIndex)); // view snapshots for repo final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots("repo").get(); assertThat(getSnapshotResponse.getSnapshots().size(), is(1)); assertThat(getSnapshotResponse.getSnapshots().get(0).snapshotId().getName(), is("snap")); - assertThat(getSnapshotResponse.getSnapshots().get(0).indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + assertThat(getSnapshotResponse.getSnapshots().get(0).indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX_7, ordinaryIndex)); } public void testSnapshotUserRoleIsReserved() { @@ -112,7 +112,7 @@ public void testSnapshotUserRoleUnathorizedForDestructiveActions() { () -> client.admin().cluster().prepareDeleteSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), "cluster:admin/snapshot/delete", "snapshot_user"); // try destructive/revealing actions on all indices - for (final String indexToTest : Arrays.asList(INTERNAL_SECURITY_INDEX, SECURITY_INDEX_NAME, ordinaryIndex)) { + for (final String indexToTest : Arrays.asList(INTERNAL_SECURITY_INDEX_7, SECURITY_INDEX_NAME, ordinaryIndex)) { assertThrowsAuthorizationException(() -> client.prepareSearch(indexToTest).get(), "indices:data/read/search", "snapshot_user"); assertThrowsAuthorizationException(() -> client.prepareGet(indexToTest, "doc", "1").get(), "indices:data/read/get", "snapshot_user"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java index 1f40f1c480f6b..12ae548119eec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java @@ -53,6 +53,7 @@ public void testIndex() { //the missing index gets automatically created (user has permissions for that), but indexing fails due to missing authorization assertThrowsAuthorizationExceptionDefaultUsers(client().prepareIndex("missing", "type", "id").setSource("field", "value")::get, BulkAction.NAME + "[s]"); + ensureGreen(); } public void testDelete() { @@ -63,6 +64,7 @@ public void testDelete() { assertThrowsAuthorizationExceptionDefaultUsers(client().prepareDelete("index1", "type", "id")::get, BulkAction.NAME + "[s]"); expectThrows(IndexNotFoundException.class, () -> client().prepareDelete("test4", "type", "id").get()); + ensureGreen(); } public void testUpdate() { @@ -79,6 +81,7 @@ public void testUpdate() { assertThrowsAuthorizationExceptionDefaultUsers(client().prepareUpdate("missing", "type", "id") .setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2")::get, UpdateAction.NAME); + ensureGreen(); } public void testBulk() { @@ -160,5 +163,6 @@ public void testBulk() { assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[12].getFailure().getCause(), BulkAction.NAME + "[s]"); assertThat(bulkResponse.getItems()[12].getFailure().getCause().getMessage(), containsString("[indices:data/write/bulk[s]] is unauthorized")); + ensureGreen(); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java index ed1eed302356a..8f3e69815750d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -293,8 +293,10 @@ public void testErrorMessageIfIndexPatternIsTooComplex() { public void testSecurityIndicesPermissions() { final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final String internalSecurityIndex = randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); final MetaData metaData = new MetaData.Builder() - .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .put(new IndexMetaData.Builder(internalSecurityIndex) .settings(indexSettings) .numberOfShards(1) .numberOfReplicas(0) @@ -307,17 +309,17 @@ public void testSecurityIndicesPermissions() { // allow_restricted_indices: false IndicesPermission.Group group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, false, "*"); Map authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, - Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), lookup, + Sets.newHashSet(internalSecurityIndex, RestrictedIndicesNames.SECURITY_INDEX_NAME), lookup, fieldPermissionsCache); - assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(false)); + assertThat(authzMap.get(internalSecurityIndex).isGranted(), is(false)); assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(false)); // allow_restricted_indices: true group = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, true, "*"); authzMap = new IndicesPermission(group).authorize(SearchAction.NAME, - Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), lookup, + Sets.newHashSet(internalSecurityIndex, RestrictedIndicesNames.SECURITY_INDEX_NAME), lookup, fieldPermissionsCache); - assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); + assertThat(authzMap.get(internalSecurityIndex).isGranted(), is(true)); assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java index 54dbdc3d33d72..99d23cc8b1059 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -136,6 +136,7 @@ public void testOptOutQueryCacheAuthIsNotAllowed() { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(false); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); cache.doCache(weight, policy); @@ -154,6 +155,7 @@ public void testOptOutQueryCacheNoIndicesPermissions() { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); final Weight w = cache.doCache(weight, policy); @@ -178,6 +180,7 @@ public void testOptOutQueryCacheIndexDoesNotHaveFieldLevelSecurity() { final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isAuthAllowed()).thenReturn(true); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); final Weight weight = mock(Weight.class); final QueryCachingPolicy policy = mock(QueryCachingPolicy.class); cache.doCache(weight, policy); @@ -195,6 +198,7 @@ public void testOptOutQueryCacheRemovesLicenseStateListenerOnClose() { final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final XPackLicenseState licenseState = mock(XPackLicenseState.class); final OptOutQueryCache cache = new OptOutQueryCache(indexSettings, indicesQueryCache, threadContext, licenseState); + cache.listenForLicenseStateChanges(); verify(licenseState).addListener(cache); cache.close(); verify(licenseState).removeListener(cache); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 5061d4c11edc1..f17442ca8464e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; @@ -94,6 +95,8 @@ public class CompositeRolesStoreTests extends ESTestCase { .build(); private final FieldPermissionsCache cache = new FieldPermissionsCache(Settings.EMPTY); + private final String concreteSecurityIndexName = randomFrom( + RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_6, RestrictedIndicesNames.INTERNAL_SECURITY_INDEX_7); public void testRolesWhenDlsFlsUnlicensed() throws IOException { XPackLicenseState licenseState = mock(XPackLicenseState.class); @@ -695,7 +698,7 @@ Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(Nativ } private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { - return new SecurityIndexManager.State(true, true, true, true, null, indexStatus); + return new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, indexStatus); } public void testCacheClearOnIndexHealthChange() { @@ -770,13 +773,13 @@ public void invalidateAll() { }; compositeRolesStore.onSecurityIndexStateChange( - new SecurityIndexManager.State(true, false, true, true, null, null), - new SecurityIndexManager.State(true, true, true, true, null, null)); + new SecurityIndexManager.State(true, false, true, true, null, concreteSecurityIndexName, null), + new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, null)); assertEquals(1, numInvalidation.get()); compositeRolesStore.onSecurityIndexStateChange( - new SecurityIndexManager.State(true, true, true, true, null, null), - new SecurityIndexManager.State(true, false, true, true, null, null)); + new SecurityIndexManager.State(true, true, true, true, null, concreteSecurityIndexName, null), + new SecurityIndexManager.State(true, false, true, true, null, concreteSecurityIndexName, null)); assertEquals(2, numInvalidation.get()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index e2acbb81560bc..8f60b1d30523f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -125,7 +126,7 @@ public void testGetSinglePrivilegeByName() throws Exception { assertThat(requests.get(0), instanceOf(GetRequest.class)); GetRequest request = (GetRequest) requests.get(0); assertThat(request.index(), equalTo(SecurityIndexManager.SECURITY_INDEX_NAME)); - assertThat(request.type(), equalTo("doc")); + assertThat(request.type(), equalTo(MapperService.SINGLE_MAPPING_NAME)); assertThat(request.id(), equalTo("application-privilege_myapp:admin")); final String docSource = Strings.toString(sourcePrivilege); @@ -143,7 +144,7 @@ public void testGetMissingPrivilege() throws Exception { assertThat(requests.get(0), instanceOf(GetRequest.class)); GetRequest request = (GetRequest) requests.get(0); assertThat(request.index(), equalTo(SecurityIndexManager.SECURITY_INDEX_NAME)); - assertThat(request.type(), equalTo("doc")); + assertThat(request.type(), equalTo(MapperService.SINGLE_MAPPING_NAME)); assertThat(request.id(), equalTo("application-privilege_myapp:admin")); listener.get().onResponse(new GetResponse( @@ -229,7 +230,7 @@ public void testPutPrivileges() throws Exception { ApplicationPrivilegeDescriptor privilege = putPrivileges.get(i); IndexRequest request = indexRequests.get(i); assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); - assertThat(request.type(), equalTo("doc")); + assertThat(request.type(), equalTo(MapperService.SINGLE_MAPPING_NAME)); assertThat(request.id(), equalTo( "application-privilege_" + privilege.getApplication() + ":" + privilege.getName() )); @@ -274,7 +275,7 @@ public void testDeletePrivileges() throws Exception { String name = privilegeNames.get(i); DeleteRequest request = deletes.get(i); assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); - assertThat(request.type(), equalTo("doc")); + assertThat(request.type(), equalTo(MapperService.SINGLE_MAPPING_NAME)); assertThat(request.id(), equalTo("application-privilege_app1:" + name)); final boolean found = name.equals("p2") == false; deleteListener.onResponse(new DeleteResponse( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java index 4ff582f01bd88..4b40d165b5e49 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -7,6 +7,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; @@ -24,11 +25,13 @@ public class SecurityBaseRestHandlerTests extends ESTestCase { public void testSecurityBaseRestHandlerChecksLicenseState() throws Exception { - final boolean securityDisabledByTrial = randomBoolean(); + final boolean securityDisabledByLicenseDefaults = randomBoolean(); final AtomicBoolean consumerCalled = new AtomicBoolean(false); final XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isSecurityAvailable()).thenReturn(true); - when(licenseState.isSecurityDisabledByTrialLicense()).thenReturn(securityDisabledByTrial); + when(licenseState.isSecurityDisabledByLicenseDefaults()).thenReturn(securityDisabledByLicenseDefaults); + when(licenseState.getOperationMode()).thenReturn( + randomFrom(License.OperationMode.BASIC, License.OperationMode.STANDARD, License.OperationMode.GOLD)); SecurityBaseRestHandler handler = new SecurityBaseRestHandler(Settings.EMPTY, licenseState) { @Override @@ -46,7 +49,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityDisabledByTrial ? 1 : 0); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityDisabledByLicenseDefaults ? 1 : 0); NodeClient client = mock(NodeClient.class); assertFalse(consumerCalled.get()); @@ -54,7 +57,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien handler.handleRequest(fakeRestRequest, fakeRestChannel, client); verify(licenseState).isSecurityAvailable(); - if (securityDisabledByTrial == false) { + if (securityDisabledByLicenseDefaults == false) { assertTrue(consumerCalled.get()); assertEquals(0, fakeRestChannel.responses().get()); assertEquals(0, fakeRestChannel.errors().get()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java similarity index 95% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyActionTests.java rename to x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 1b1b0fe8f0f1a..394c9747b6daf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; @@ -30,6 +30,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.action.CreateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.CreateApiKeyResponse; +import org.elasticsearch.xpack.security.rest.action.apikey.RestCreateApiKeyAction; import java.time.Duration; import java.time.Instant; @@ -56,6 +57,7 @@ public void setUp() throws Exception { .build(); threadPool = new ThreadPool(settings); when(mockLicenseState.isSecurityAvailable()).thenReturn(true); + when(mockLicenseState.isApiKeyServiceAllowed()).thenReturn(true); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java similarity index 97% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyActionTests.java rename to x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index 533fa6195edc2..9788bc1a5b22f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.security.action.ApiKey; import org.elasticsearch.xpack.core.security.action.GetApiKeyRequest; import org.elasticsearch.xpack.core.security.action.GetApiKeyResponse; +import org.elasticsearch.xpack.security.rest.action.apikey.RestGetApiKeyAction; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -56,6 +57,7 @@ public void setUp() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); threadPool = new ThreadPool(settings); when(mockLicenseState.isSecurityAvailable()).thenReturn(true); + when(mockLicenseState.isApiKeyServiceAllowed()).thenReturn(true); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java similarity index 96% rename from x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyActionTests.java rename to x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 6a8a60ae2a999..e73f4e3c210d4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.security.rest.action; +package org.elasticsearch.xpack.security.rest.action.apikey; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchSecurityException; @@ -29,6 +29,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyRequest; import org.elasticsearch.xpack.core.security.action.InvalidateApiKeyResponse; +import org.elasticsearch.xpack.security.rest.action.apikey.RestInvalidateApiKeyAction; import java.util.Collections; @@ -52,6 +53,7 @@ public void setUp() throws Exception { .build(); threadPool = new ThreadPool(settings); when(mockLicenseState.isSecurityAvailable()).thenReturn(true); + when(mockLicenseState.isApiKeyServiceAllowed()).thenReturn(true); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java index 5b442deacf6e7..66993c2269dfd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java @@ -27,16 +27,9 @@ public void testSamlAvailableOnTrialAndPlatinum() { assertThat(handler.checkFeatureAvailable(new FakeRestRequest()), Matchers.nullValue()); } - public void testSecurityNotAvailableOnBasic() { - final SamlBaseRestHandler handler = buildHandler(License.OperationMode.BASIC); - Exception e = handler.checkFeatureAvailable(new FakeRestRequest()); - assertThat(e, instanceOf(ElasticsearchException.class)); - ElasticsearchException elasticsearchException = (ElasticsearchException) e; - assertThat(elasticsearchException.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), contains("security")); - } - - public void testSamlNotAvailableOnStandardOrGold() { - final SamlBaseRestHandler handler = buildHandler(randomFrom(License.OperationMode.STANDARD, License.OperationMode.GOLD)); + public void testSamlNotAvailableOnBasicStandardOrGold() { + final SamlBaseRestHandler handler = buildHandler(randomFrom(License.OperationMode.BASIC, License.OperationMode.STANDARD, + License.OperationMode.GOLD)); Exception e = handler.checkFeatureAvailable(new FakeRestRequest()); assertThat(e, instanceOf(ElasticsearchException.class)); ElasticsearchException elasticsearchException = (ElasticsearchException) e; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 0741d1c04e995..7045d70c38142 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -15,6 +15,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -26,6 +27,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -40,10 +42,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.security.test.SecurityTestUtils; @@ -55,6 +60,10 @@ import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_TEMPLATE_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -72,6 +81,7 @@ public void setUpManager() { final Client mockClient = mock(Client.class); final ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(threadPool.generic()).thenReturn(EsExecutors.newDirectExecutorService()); when(mockClient.threadPool()).thenReturn(threadPool); when(mockClient.settings()).thenReturn(Settings.EMPTY); final ClusterService clusterService = mock(ClusterService.class); @@ -192,6 +202,67 @@ public void testIndexHealthChangeListeners() throws Exception { assertEquals(ClusterHealthStatus.GREEN, currentState.get().indexStatus); } + public void testWriteBeforeStateNotRecovered() throws Exception { + final AtomicBoolean prepareRunnableCalled = new AtomicBoolean(false); + final AtomicReference prepareException = new AtomicReference<>(null); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(notNullValue())); + assertThat(prepareException.get(), instanceOf(ElasticsearchStatusException.class)); + assertThat(((ElasticsearchStatusException)prepareException.get()).status(), is(RestStatus.SERVICE_UNAVAILABLE)); + assertThat(prepareRunnableCalled.get(), is(false)); + prepareException.set(null); + prepareRunnableCalled.set(false); + // state not recovered + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK); + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME).blocks(blocks))); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(notNullValue())); + assertThat(prepareException.get(), instanceOf(ElasticsearchStatusException.class)); + assertThat(((ElasticsearchStatusException)prepareException.get()).status(), is(RestStatus.SERVICE_UNAVAILABLE)); + assertThat(prepareRunnableCalled.get(), is(false)); + prepareException.set(null); + prepareRunnableCalled.set(false); + // state recovered with index + ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, + SecurityIndexManager.INTERNAL_INDEX_FORMAT); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + manager.prepareIndexIfNeededThenExecute(ex -> { + prepareException.set(ex); + }, () -> { + prepareRunnableCalled.set(true); + }); + assertThat(prepareException.get(), is(nullValue())); + assertThat(prepareRunnableCalled.get(), is(true)); + } + + public void testListeneredNotCalledBeforeStateNotRecovered() throws Exception { + final AtomicBoolean listenerCalled = new AtomicBoolean(false); + manager.addIndexStateListener((prev, current) -> { + listenerCalled.set(true); + }); + final ClusterBlocks.Builder blocks = ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK); + // state not recovered + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME).blocks(blocks))); + assertThat(manager.isStateRecovered(), is(false)); + assertThat(listenerCalled.get(), is(false)); + // state recovered with index + ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, + SecurityIndexManager.INTERNAL_INDEX_FORMAT); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + assertThat(manager.isStateRecovered(), is(true)); + assertThat(listenerCalled.get(), is(true)); + } + public void testIndexOutOfDateListeners() throws Exception { final AtomicBoolean listenerCalled = new AtomicBoolean(false); manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME))); @@ -236,12 +307,14 @@ private void assertInitialState() { assertThat(manager.indexExists(), Matchers.equalTo(false)); assertThat(manager.isAvailable(), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(false)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(false)); } private void assertIndexUpToDateButNotAvailable() { assertThat(manager.indexExists(), Matchers.equalTo(true)); assertThat(manager.isAvailable(), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); } public static ClusterState.Builder createClusterState(String indexName, String templateName) throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java new file mode 100644 index 0000000000000..5ff329ceced84 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.test; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.junit.Assert; + +import java.util.function.Consumer; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_INDEX_NAME; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Utility class for constructing commonly used mock objects. + * Note to maintainers: + * It is not intended that this class cover _all_ mocking scenarios. Consider very carefully before adding methods to this class that are + * only used in one or 2 places. This class is intended for the situations where a common piece of complex mock code is used in multiple + * test suites. + */ +public final class SecurityMocks { + + private SecurityMocks() { + throw new IllegalStateException("Cannot instantiate utility class"); + } + + public static SecurityIndexManager mockSecurityIndexManager() { + return mockSecurityIndexManager(true, true); + } + + public static SecurityIndexManager mockSecurityIndexManager(boolean exists, boolean available) { + final SecurityIndexManager securityIndexManager = mock(SecurityIndexManager.class); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(securityIndexManager).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(securityIndexManager).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); + when(securityIndexManager.indexExists()).thenReturn(exists); + when(securityIndexManager.isAvailable()).thenReturn(available); + return securityIndexManager; + } + + public static void mockGetRequest(Client client, String documentId, BytesReference source) { + GetResult result = new GetResult(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, documentId, 0, 1, 1, true, source, emptyMap()); + mockGetRequest(client, documentId, result); + } + + public static void mockGetRequest(Client client, String documentId, GetResult result) { + final GetRequestBuilder requestBuilder = new GetRequestBuilder(client, GetAction.INSTANCE); + requestBuilder.setIndex(SECURITY_INDEX_NAME); + requestBuilder.setType(SINGLE_MAPPING_NAME); + requestBuilder.setId(documentId); + when(client.prepareGet(SECURITY_INDEX_NAME, SINGLE_MAPPING_NAME, documentId)).thenReturn(requestBuilder); + + doAnswer(inv -> { + Assert.assertThat(inv.getArguments(), arrayWithSize(2)); + Assert.assertThat(inv.getArguments()[0], instanceOf(GetRequest.class)); + final GetRequest request = (GetRequest) inv.getArguments()[0]; + Assert.assertThat(request.id(), equalTo(documentId)); + Assert.assertThat(request.index(), equalTo(SECURITY_INDEX_NAME)); + Assert.assertThat(request.type(), equalTo(SINGLE_MAPPING_NAME)); + + Assert.assertThat(inv.getArguments()[1], instanceOf(ActionListener.class)); + ActionListener listener = (ActionListener) inv.getArguments()[1]; + listener.onResponse(new GetResponse(result)); + + return null; + }).when(client).get(any(GetRequest.class), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java index 350c55a558cb6..cce9c7ecdd0b9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -207,13 +208,13 @@ private ServerTransportFilter.ClientProfile getClientFilter(boolean reservedReal Settings settings = Settings.builder().put("path.home", createTempDir()).build(); ThreadContext threadContext = new ThreadContext(settings); return new ServerTransportFilter.ClientProfile(authcService, authzService, threadContext, false, destructiveOperations, - reservedRealmEnabled, new SecurityContext(settings, threadContext)); + reservedRealmEnabled, new SecurityContext(settings, threadContext), new XPackLicenseState(settings)); } private ServerTransportFilter.NodeProfile getNodeFilter(boolean reservedRealmEnabled) throws IOException { Settings settings = Settings.builder().put("path.home", createTempDir()).build(); ThreadContext threadContext = new ThreadContext(settings); return new ServerTransportFilter.NodeProfile(authcService, authzService, threadContext, false, destructiveOperations, - reservedRealmEnabled, new SecurityContext(settings, threadContext)); + reservedRealmEnabled, new SecurityContext(settings, threadContext), new XPackLicenseState(settings)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java index 78825d95ce078..e3777fc854599 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.transport.filter; +import org.elasticsearch.common.Numbers; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; @@ -26,6 +27,9 @@ import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -35,6 +39,7 @@ import java.util.Map; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -140,7 +145,8 @@ public void testThatProfilesAreSupported() throws Exception { ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); assertAddressIsAllowed("127.0.0.1"); - assertAddressIsDenied("192.168.0.1"); + // when "localhost" is used, ES considers all local addresses see PatternRule#isLocalhost() + assertAddressIsDenied(randomNonLocalIPv4Address()); assertAddressIsAllowedForProfile("client", "192.168.0.1"); assertAddressIsDeniedForProfile("client", "192.168.0.2"); } @@ -161,7 +167,8 @@ public void testThatProfilesAreUpdateable() throws Exception { clusterSettings.updateDynamicSettings(newSettings, updatedSettingsBuilder, Settings.builder(), "test"); clusterSettings.applySettings(updatedSettingsBuilder.build()); assertAddressIsAllowed("127.0.0.1"); - assertAddressIsDenied("192.168.0.1"); + // when "localhost" is used, ES considers all local addresses see PatternRule#isLocalhost() + assertAddressIsDenied(randomNonLocalIPv4Address()); assertAddressIsAllowedForProfile("client", "192.168.0.1", "192.168.0.2"); assertAddressIsDeniedForProfile("client", "192.168.0.3"); } @@ -297,4 +304,22 @@ private void assertAddressIsDeniedForProfile(String profile, String ... inetAddr private void assertAddressIsDenied(String ... inetAddresses) { assertAddressIsDeniedForProfile("default", inetAddresses); } + + private String randomNonLocalIPv4Address() throws SocketException, UnknownHostException { + String ipv4Address = null; + int noOfRetries = 0; + do { + noOfRetries++; + final InetAddress address = InetAddress.getByAddress(Numbers.intToBytes(randomInt())); + if (address.isAnyLocalAddress() || address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null) { + continue; + } else { + ipv4Address = NetworkAddress.format(address); + break; + } + } while (ipv4Address == null && noOfRetries < 25); + assertThat("could not generate random IPv4 address which is not local address", ipv4Address, notNullValue()); + return ipv4Address; + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java index e7b31d88eda19..a295e47b6d704 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -28,11 +29,14 @@ public void testXPackUserCanAccessNonSecurityIndices() { assertThat(predicate.test(index), Matchers.is(true)); } - public void testXPackUserCannotAccessSecurityIndex() { + public void testXPackUserCannotAccessRestrictedIndices() { final String action = randomFrom(GetAction.NAME, SearchAction.NAME, IndexAction.NAME); final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); assertThat(predicate.test(SecurityIndexManager.SECURITY_INDEX_NAME), Matchers.is(false)); assertThat(predicate.test(SecurityIndexManager.INTERNAL_SECURITY_INDEX), Matchers.is(false)); + for (String index : RestrictedIndicesNames.RESTRICTED_NAMES) { + assertThat(predicate.test(index), Matchers.is(false)); + } } public void testXPackUserCanReadAuditTrail() { @@ -52,4 +56,4 @@ private String getAuditLogName() { final IndexNameResolver.Rollover rollover = randomFrom(IndexNameResolver.Rollover.values()); return IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, date, rollover); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/resources/SecurityIndexManagerTests-template.json b/x-pack/plugin/security/src/test/resources/SecurityIndexManagerTests-template.json index 0957b1da7ec70..c795194ffac16 100644 --- a/x-pack/plugin/security/src/test/resources/SecurityIndexManagerTests-template.json +++ b/x-pack/plugin/security/src/test/resources/SecurityIndexManagerTests-template.json @@ -1,7 +1,7 @@ { "index_patterns": ".security", "mappings": { - "doc": { + "_doc": { "_meta": { "security-version": "${security.template.version}" }, diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index cd2f82ee7b308..3d0aafecadd7a 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -1,5 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -10,6 +8,12 @@ esplugin { extendedPlugins = ['x-pack-core', 'lang-painless'] } +ext { + // SQL dependency versions + jlineVersion="3.10.0" + antlrVersion="4.5.3" +} + configurations { // Bundles the sql-cli.jar into the distribution bin @@ -20,14 +24,13 @@ archivesBaseName = 'x-pack-sql' // All integration tests live in qa modules integTest.enabled = false -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - dependsOn: unitTest.dependsOn) { +task internalClusterTest(type: Test) { + mustRunAfter test include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } + check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here @@ -81,7 +84,7 @@ configurations { } dependencies { - regenerate 'org.antlr:antlr4:4.5.3' + regenerate "org.antlr:antlr4:${antlrVersion}" } String grammarPath = 'src/main/antlr' diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index a077b4ac7ba10..3c7eb6b804b5a 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -24,6 +24,7 @@ dependencies { compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } dependencyLicenses { @@ -36,7 +37,7 @@ dependencyLicenses { ignoreSha 'elasticsearch' } -unitTest { +test { // don't use the shaded jar for tests classpath += project.tasks.compileJava.outputs.files classpath -= project.tasks.shadowJar.outputs.files diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java index a7671d80d0104..aff896d1c21f3 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/InfoResponse.java @@ -12,10 +12,21 @@ class InfoResponse { final String cluster; final int majorVersion; final int minorVersion; + final int revisionVersion; - InfoResponse(String clusterName, byte versionMajor, byte versionMinor) { + InfoResponse(String clusterName, byte versionMajor, byte versionMinor, byte revisionVersion) { this.cluster = clusterName; this.majorVersion = versionMajor; this.minorVersion = versionMinor; + this.revisionVersion = revisionVersion; + } + + @Override + public String toString() { + return cluster + "[" + versionString() + "]"; + } + + public String versionString() { + return majorVersion + "." + minorVersion + "." + revisionVersion; } } \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java index 82b12be50c42e..9b1ff87596798 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcColumnInfo.java @@ -7,6 +7,8 @@ import java.util.Objects; +import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; + class JdbcColumnInfo { public final String catalog; public final String schema; @@ -52,17 +54,17 @@ int displaySize() { @Override public String toString() { StringBuilder b = new StringBuilder(); - if (false == "".equals(table)) { + if (false == EMPTY.equals(table)) { b.append(table).append('.'); } b.append(name).append("').toString(); diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java index 43d296058f43f..7a9154c10ac4e 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfiguration.java @@ -47,20 +47,25 @@ class JdbcConfiguration extends ConnectionConfiguration { // can be out/err/url static final String DEBUG_OUTPUT_DEFAULT = "err"; - public static final String TIME_ZONE = "timezone"; + static final String TIME_ZONE = "timezone"; // follow the JDBC spec and use the JVM default... // to avoid inconsistency, the default is picked up once at startup and reused across connections // to cater to the principle of least surprise // really, the way to move forward is to specify a calendar or the timezone manually static final String TIME_ZONE_DEFAULT = TimeZone.getDefault().getID(); + static final String FIELD_MULTI_VALUE_LENIENCY = "field.multi.value.leniency"; + static final String FIELD_MULTI_VALUE_LENIENCY_DEFAULT = "true"; + + // options that don't change at runtime - private static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(TIME_ZONE, DEBUG, DEBUG_OUTPUT)); + private static final Set OPTION_NAMES = new LinkedHashSet<>( + Arrays.asList(TIME_ZONE, FIELD_MULTI_VALUE_LENIENCY, DEBUG, DEBUG_OUTPUT)); static { // trigger version initialization // typically this should have already happened but in case the - // JdbcDriver/JdbcDataSource are not used and the impl. classes used directly + // EsDriver/EsDataSource are not used and the impl. classes used directly // this covers that case Version.CURRENT.toString(); } @@ -71,6 +76,7 @@ class JdbcConfiguration extends ConnectionConfiguration { // mutable ones private ZoneId zoneId; + private boolean fieldMultiValueLeniency; public static JdbcConfiguration create(String u, Properties props, int loginTimeoutSeconds) throws JdbcSQLException { URI uri = parseUrl(u); @@ -151,13 +157,19 @@ private JdbcConfiguration(URI baseURI, String u, Properties props) throws JdbcSQ this.zoneId = parseValue(TIME_ZONE, props.getProperty(TIME_ZONE, TIME_ZONE_DEFAULT), s -> TimeZone.getTimeZone(s).toZoneId().normalized()); + this.fieldMultiValueLeniency = parseValue(FIELD_MULTI_VALUE_LENIENCY, + props.getProperty(FIELD_MULTI_VALUE_LENIENCY, FIELD_MULTI_VALUE_LENIENCY_DEFAULT), Boolean::parseBoolean); } @Override - protected Collection extraOptions() { + protected Collection extraOptions() { return OPTION_NAMES; } + ZoneId zoneId() { + return zoneId; + } + public boolean debug() { return debug; } @@ -170,8 +182,8 @@ public TimeZone timeZone() { return zoneId != null ? TimeZone.getTimeZone(zoneId) : null; } - public void timeZone(TimeZone timeZone) { - this.zoneId = timeZone != null ? timeZone.toZoneId() : null; + public boolean fieldMultiValueLeniency() { + return fieldMultiValueLeniency; } public static boolean canAccept(String url) { @@ -180,12 +192,11 @@ public static boolean canAccept(String url) { public DriverPropertyInfo[] driverPropertyInfo() { List info = new ArrayList<>(); - for (String option : OPTION_NAMES) { - String value = null; - DriverPropertyInfo prop = new DriverPropertyInfo(option, value); + for (String option : optionNames()) { + DriverPropertyInfo prop = new DriverPropertyInfo(option, null); info.add(prop); } return info.toArray(new DriverPropertyInfo[info.size()]); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java index c682c5ac05c63..09096fbe405a0 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcConnection.java @@ -45,9 +45,12 @@ class JdbcConnection implements Connection, JdbcWrapper { * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown */ JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { - cfg = connectionInfo; - client = new JdbcHttpClient(connectionInfo); + this(connectionInfo, true); + } + JdbcConnection(JdbcConfiguration connectionInfo, boolean checkServer) throws SQLException { + cfg = connectionInfo; + client = new JdbcHttpClient(connectionInfo, checkServer); url = connectionInfo.connectionString(); userName = connectionInfo.authUser(); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 5697453730455..4f646b8547693 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -10,15 +10,17 @@ import java.sql.Connection; import java.sql.DatabaseMetaData; +import java.sql.DriverPropertyInfo; import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.RowIdLifetime; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import java.util.ArrayList; import java.util.List; +import static java.sql.JDBCType.BIGINT; +import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.INTEGER; import static java.sql.JDBCType.SMALLINT; import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; @@ -31,6 +33,8 @@ */ class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { + private static final String WILDCARD = "%"; + private final JdbcConnection con; JdbcDatabaseMetaData(JdbcConnection con) { @@ -257,8 +261,7 @@ public boolean nullPlusNonNullIsNull() throws SQLException { @Override public boolean supportsConvert() throws SQLException { - //TODO: add Convert - return false; + return true; } @Override @@ -664,8 +667,7 @@ public boolean dataDefinitionIgnoredInTransactions() throws SQLException { // https://www.postgresql.org/docs/9.0/static/infoschema-routines.html @Override public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { - return emptySet(con.cfg, - "ROUTINES", + return emptySet(con.cfg, "ROUTINES", "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -680,8 +682,7 @@ public ResultSet getProcedures(String catalog, String schemaPattern, String proc @Override public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, - "PARAMETERS", + return emptySet(con.cfg, "ROUTINES_COLUMNS", "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -714,19 +715,19 @@ private boolean isDefaultCatalog(String catalog) throws SQLException { // null means catalog info is irrelevant // % means return all catalogs // EMPTY means return those without a catalog - return catalog == null || catalog.equals(EMPTY) || catalog.equals("%") || catalog.equals(defaultCatalog()); + return catalog == null || catalog.equals(EMPTY) || catalog.equals(WILDCARD) || catalog.equals(defaultCatalog()); } private boolean isDefaultSchema(String schema) { // null means schema info is irrelevant // % means return all schemas` // EMPTY means return those without a schema - return schema == null || schema.equals(EMPTY) || schema.equals("%"); + return schema == null || schema.equals(EMPTY) || schema.equals(WILDCARD); } @Override public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { - String statement = "SYS TABLES CATALOG LIKE ? LIKE ?"; + String statement = "SYS TABLES CATALOG LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\' "; if (types != null && types.length > 0) { statement += " TYPE ?"; @@ -739,8 +740,8 @@ public ResultSet getTables(String catalog, String schemaPattern, String tableNam } PreparedStatement ps = con.prepareStatement(statement); - ps.setString(1, catalog != null ? catalog.trim() : "%"); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); + ps.setString(1, catalog != null ? catalog.trim() : WILDCARD); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); if (types != null && types.length > 0) { for (int i = 0; i < types.length; i++) { @@ -753,88 +754,166 @@ public ResultSet getTables(String catalog, String schemaPattern, String tableNam @Override public ResultSet getSchemas() throws SQLException { - Object[][] data = { { EMPTY, defaultCatalog() } }; - return memorySet(con.cfg, columnInfo("SCHEMATA", - "TABLE_SCHEM", - "TABLE_CATALOG"), data); + return emptySet(con.cfg, "SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"); } @Override public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { - List info = columnInfo("SCHEMATA", - "TABLE_SCHEM", - "TABLE_CATALOG"); - if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { - return emptySet(con.cfg, info); - } - Object[][] data = { { EMPTY, defaultCatalog() } }; - return memorySet(con.cfg, info, data); + return getSchemas(); } @Override public ResultSet getCatalogs() throws SQLException { // TABLE_CAT is the first column - Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%'", 1); - return memorySet(con.cfg, columnInfo("", "TABLE_CAT"), data); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%' LIKE ''", 1); + return memorySet(con.cfg, columnInfo("CATALOGS", "TABLE_CAT"), data); } @Override public ResultSet getTableTypes() throws SQLException { // TABLE_TYPE (4) - Object[][] data = queryColumn(con, "SYS TABLES TYPE '%'", 4); - return memorySet(con.cfg, columnInfo("", "TABLE_TYPE"), data); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", 4); + return memorySet(con.cfg, columnInfo("TABLE_TYPES", "TABLE_TYPE"), data); } + @Override public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? LIKE ?"); - // TODO: until passing null works, pass an empty string - ps.setString(1, catalog != null ? catalog.trim() : EMPTY); - ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); - ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : "%"); + PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? ESCAPE '\\' LIKE ? ESCAPE '\\'"); + // NB: catalog is not a pattern hence why null is send instead + ps.setString(1, catalog != null ? catalog.trim() : null); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : WILDCARD); + ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : WILDCARD); return ps.executeQuery(); } @Override public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { - throw new SQLFeatureNotSupportedException("Privileges not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "GRANTOR", + "GRANTEE", + "PRIVILEGE", + "IS_GRANTABLE"); } @Override public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - throw new SQLFeatureNotSupportedException("Privileges not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "GRANTOR", + "GRANTEE", + "PRIVILEGE", + "IS_GRANTABLE"); } @Override public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { - throw new SQLFeatureNotSupportedException("Row identifiers not supported"); + return emptySet(con.cfg, "", + "SCOPE", SMALLINT, + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "COLUMN_SIZE", INTEGER, + "BUFFER_LENGTH", INTEGER, + "DECIMAL_DIGITS", SMALLINT, + "PSEUDO_COLUMN", SMALLINT); } @Override public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Version column not supported yet"); + return emptySet(con.cfg, "", + "SCOPE", SMALLINT, + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "COLUMN_SIZE", INTEGER, + "BUFFER_LENGTH", INTEGER, + "DECIMAL_DIGITS", SMALLINT, + "PSEUDO_COLUMN", SMALLINT); } @Override public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Primary keys not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "KEY_SEQ", SMALLINT, + "PK_NAME"); } @Override public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Imported keys not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { - throw new SQLFeatureNotSupportedException("Exported keys not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - throw new SQLFeatureNotSupportedException("Cross reference not supported"); + return emptySet(con.cfg, "", + "PKTABLE_CAT", + "PKTABLE_SCHEM", + "PKTABLE_NAME", + "PKCOLUMN_NAME", + "FKTABLE_CAT", + "FKTABLE_SCHEM", + "FKTABLE_NAME", + "FKCOLUMN_NAME", + "KEY_SEQ", SMALLINT, + "UPDATE_RULE ", SMALLINT, + "DELETE_RULE ", SMALLINT, + "FK_NAME", + "PK_NAME ", + "DEFERRABILITY", SMALLINT, + "IS_NULLABLE" + ); } @Override @@ -844,7 +923,22 @@ public ResultSet getTypeInfo() throws SQLException { @Override public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { - throw new SQLFeatureNotSupportedException("Indicies not supported"); + return emptySet(con.cfg, "", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "NON_UNIQUE", BOOLEAN, + "INDEX_QUALIFIER", + "INDEX_NAME", + "TYPE", SMALLINT, + "ORDINAL_POSITION", SMALLINT, + "COLUMN_NAME", + "ASC_OR_DESC", + "CARDINALITY", BIGINT, + "PAGES", BIGINT, + "FILTER_CONDITION", + "TYPE_NAME" + ); } @Override @@ -909,7 +1003,7 @@ public boolean supportsBatchUpdates() throws SQLException { @Override public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "USER_DEFINED_TYPES", "TYPE_CAT", "TYPE_SCHEM", @@ -947,7 +1041,7 @@ public boolean supportsGetGeneratedKeys() throws SQLException { @Override public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "SUPER_TYPES", "TYPE_CAT", "TYPE_SCHEM", @@ -960,7 +1054,7 @@ public ResultSet getSuperTypes(String catalog, String schemaPattern, String type @Override public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - return emptySet(con.cfg, "SUPER_TABLES", + return emptySet(con.cfg, "", "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", @@ -970,7 +1064,7 @@ public ResultSet getSuperTables(String catalog, String schemaPattern, String tab @Override public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "ATTRIBUTES", "TYPE_CAT", "TYPE_SCHEM", @@ -1057,12 +1151,27 @@ public boolean autoCommitFailureClosesAllResultSets() throws SQLException { @Override public ResultSet getClientInfoProperties() throws SQLException { - throw new SQLException("Client info not implemented yet"); + DriverPropertyInfo[] info = con.cfg.driverPropertyInfo(); + Object[][] data = new Object[info.length][]; + + for (int i = 0; i < data.length; i++) { + data[i] = new Object[4]; + data[i][0] = info[i].name; + data[i][1] = Integer.valueOf(-1); + data[i][2] = EMPTY; + data[i][3] = EMPTY; + } + + return memorySet(con.cfg, columnInfo("", + "NAME", + "MAX_LEN", INTEGER, + "DEFAULT_VALUE", + "DESCRIPTION"), data); } @Override public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "FUNCTIONS", "FUNCTION_CAT", "FUNCTION_SCHEM", @@ -1075,7 +1184,7 @@ public ResultSet getFunctions(String catalog, String schemaPattern, String funct @Override public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "FUNCTION_COLUMNS", "FUNCTION_CAT", "FUNCTION_SCHEM", @@ -1098,7 +1207,7 @@ public ResultSet getFunctionColumns(String catalog, String schemaPattern, String @Override public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - return emptySet(con.cfg, + return emptySet(con.cfg, "", "PSEUDO_COLUMNS", "TABLE_CAT", "TABLE_SCHEM", @@ -1213,7 +1322,7 @@ public Object column(int column) { @Override public int batchSize() { - return data.length; + return ObjectUtils.isEmpty(data) ? 0 : data.length; } @Override diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java index f034f67f186e5..c0f2e6e46ea03 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java @@ -9,6 +9,7 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.time.LocalDate; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -26,9 +27,11 @@ * from {@code org.elasticsearch.xpack.sql.util.DateUtils} and {@code org.elasticsearch.xpack.sql.proto.StringUtils}. */ final class JdbcDateUtils { - - private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; - + + private JdbcDateUtils() {} + + private static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); + static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder() .parseCaseInsensitive() .append(ISO_LOCAL_DATE) @@ -42,24 +45,34 @@ final class JdbcDateUtils { .appendOffsetId() .toFormatter(Locale.ROOT); + private static ZonedDateTime asDateTime(String date) { + return ISO_WITH_MILLIS.parse(date, ZonedDateTime::from); + } + static long asMillisSinceEpoch(String date) { - return ISO_WITH_MILLIS.parse(date, ZonedDateTime::from).toInstant().toEpochMilli(); + return asDateTime(date).toInstant().toEpochMilli(); } - + static Date asDate(String date) { - return new Date(utcMillisRemoveTime(asMillisSinceEpoch(date))); + ZonedDateTime zdt = asDateTime(date); + return new Date(zdt.toLocalDate().atStartOfDay(zdt.getZone()).toInstant().toEpochMilli()); } - + static Time asTime(String date) { - return new Time(utcMillisRemoveDate(asMillisSinceEpoch(date))); + ZonedDateTime zdt = asDateTime(date); + return new Time(zdt.toLocalTime().atDate(EPOCH).atZone(zdt.getZone()).toInstant().toEpochMilli()); } - + + static Timestamp asTimestamp(long millisSinceEpoch) { + return new Timestamp(millisSinceEpoch); + } + static Timestamp asTimestamp(String date) { return new Timestamp(asMillisSinceEpoch(date)); } - + /* - * Handles the value received as parameter, as either String (a ZonedDateTime formatted in ISO 8601 standard with millis) - + * Handles the value received as parameter, as either String (a ZonedDateTime formatted in ISO 8601 standard with millis) - * date fields being returned formatted like this. Or a Long value, in case of Histograms. */ static R asDateTimeField(Object value, Function asDateTimeMethod, Function ctor) { @@ -69,12 +82,4 @@ static R asDateTimeField(Object value, Function asDateTimeMethod, return ctor.apply(((Number) value).longValue()); } } - - static long utcMillisRemoveTime(long l) { - return l - (l % DAY_IN_MILLIS); - } - - private static long utcMillisRemoveDate(long l) { - return l % DAY_IN_MILLIS; - } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index 73713f91231d6..5954d2b6c636f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -12,10 +12,9 @@ import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.MainResponse; import org.elasticsearch.xpack.sql.proto.Mode; -import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.RequestInfo; import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; -import org.elasticsearch.xpack.sql.proto.RequestInfo; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.sql.SQLException; @@ -38,8 +37,16 @@ class JdbcHttpClient { * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown */ JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { + this(conCfg, true); + } + + JdbcHttpClient(JdbcConfiguration conCfg, boolean checkServer) throws SQLException { httpClient = new HttpClient(conCfg); this.conCfg = conCfg; + if (checkServer) { + this.serverInfo = fetchServerInfo(); + checkServerVersion(); + } } boolean ping(long timeoutInMs) throws SQLException { @@ -48,10 +55,14 @@ boolean ping(long timeoutInMs) throws SQLException { Cursor query(String sql, List params, RequestMeta meta) throws SQLException { int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize(); - SqlQueryRequest sqlRequest = new SqlQueryRequest(sql, params, null, Protocol.TIME_ZONE, + SqlQueryRequest sqlRequest = new SqlQueryRequest(sql, params, conCfg.zoneId(), fetch, - TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs()), - new RequestInfo(Mode.JDBC)); + TimeValue.timeValueMillis(meta.timeoutInMs()), + TimeValue.timeValueMillis(meta.queryTimeoutInMs()), + null, + null, + new RequestInfo(Mode.JDBC), + conCfg.fieldMultiValueLeniency()); SqlQueryResponse response = httpClient.query(sqlRequest); return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta); } @@ -81,7 +92,16 @@ InfoResponse serverInfo() throws SQLException { private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); Version version = Version.fromString(mainResponse.getVersion()); - return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor); + return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor, version.revision); + } + + private void checkServerVersion() throws SQLException { + if (serverInfo.majorVersion != Version.CURRENT.major + || serverInfo.minorVersion != Version.CURRENT.minor + || serverInfo.revisionVersion != Version.CURRENT.revision) { + throw new SQLException("This version of the JDBC driver is only compatible with Elasticsearch version " + + Version.CURRENT.toString() + ", attempting to connect to a server version " + serverInfo.versionString()); + } } /** @@ -94,4 +114,4 @@ private List toJdbcColumnInfo(List columns) throws S } return cols; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index f1bce51dd3464..14e8340a54424 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -35,7 +35,6 @@ import static java.lang.String.format; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asDateTimeField; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asMillisSinceEpoch; -import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.utcMillisRemoveTime; class JdbcResultSet implements ResultSet, JdbcWrapper { @@ -91,6 +90,10 @@ private int column(String columnName) throws SQLException { return index.intValue(); } + private EsType columnType(int columnIndex) { + return cursor.columns().get(columnIndex - 1).type; + } + void checkOpen() throws SQLException { if (isClosed()) { throw new SQLException("Closed result set"); @@ -175,17 +178,17 @@ public byte[] getBytes(int columnIndex) throws SQLException { @Override public Date getDate(int columnIndex) throws SQLException { - return getDate(columnIndex, null); + return asDate(columnIndex); } @Override public Time getTime(int columnIndex) throws SQLException { - return getTime(columnIndex, null); + return asTime(columnIndex); } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { - return getTimestamp(columnIndex, null); + return asTimeStamp(columnIndex); } @Override @@ -241,9 +244,9 @@ public Date getDate(String columnLabel) throws SQLException { return getDate(column(columnLabel)); } - private Long dateTime(int columnIndex) throws SQLException { + private Long dateTimeAsMillis(int columnIndex) throws SQLException { Object val = column(columnIndex); - EsType type = cursor.columns().get(columnIndex - 1).type; + EsType type = columnType(columnIndex); try { // TODO: the B6 appendix of the jdbc spec does mention CHAR, VARCHAR, LONGVARCHAR, DATE, TIMESTAMP as supported // jdbc types that should be handled by getDate and getTime methods. From all of those we support VARCHAR and @@ -258,7 +261,7 @@ private Long dateTime(int columnIndex) throws SQLException { return asDateTimeField(val, JdbcDateUtils::asMillisSinceEpoch, Function.identity()); } if (EsType.DATE == type) { - return utcMillisRemoveTime(asMillisSinceEpoch(val.toString())); + return asMillisSinceEpoch(val.toString()); } return val == null ? null : (Long) val; } catch (ClassCastException cce) { @@ -267,13 +270,68 @@ private Long dateTime(int columnIndex) throws SQLException { } } + private Date asDate(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + try { + return JdbcDateUtils.asDate(val.toString()); + } catch (Exception e) { + EsType type = columnType(columnIndex); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", val, type.getName()), e); + } + } + + private Time asTime(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + EsType type = columnType(columnIndex); + if (type == EsType.DATE) { + return new Time(0L); + } + + try { + return JdbcDateUtils.asTime(val.toString()); + } catch (Exception e) { + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", val, type.getName()), e); + } + } + + private Timestamp asTimeStamp(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + try { + if (val instanceof Number) { + return JdbcDateUtils.asTimestamp(((Number) val).longValue()); + } + return JdbcDateUtils.asTimestamp(val.toString()); + } catch (Exception e) { + EsType type = columnType(columnIndex); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Timestamp", val, type.getName()), e); + } + } + private Calendar safeCalendar(Calendar calendar) { return calendar == null ? defaultCalendar : calendar; } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { - return TypeConverter.convertDate(dateTime(columnIndex), safeCalendar(cal)); + return TypeConverter.convertDate(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override @@ -283,7 +341,11 @@ public Date getDate(String columnLabel, Calendar cal) throws SQLException { @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { - return TypeConverter.convertTime(dateTime(columnIndex), safeCalendar(cal)); + EsType type = columnType(columnIndex); + if (type == EsType.DATE) { + return new Time(0L); + } + return TypeConverter.convertTime(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override @@ -293,7 +355,7 @@ public Time getTime(String columnLabel) throws SQLException { @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - return TypeConverter.convertTimestamp(dateTime(columnIndex), safeCalendar(cal)); + return TypeConverter.convertTimestamp(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override @@ -337,7 +399,7 @@ private T convert(int columnIndex, Class type) throws SQLException { return null; } - EsType columnType = cursor.columns().get(columnIndex - 1).type; + EsType columnType = columnType(columnIndex); String typeString = type != null ? type.getSimpleName() : columnType.getName(); return TypeConverter.convert(val, columnType, type, typeString); @@ -1141,6 +1203,7 @@ public void updateNClob(String columnLabel, Reader reader) throws SQLException { @Override public String toString() { - return format(Locale.ROOT, "%s:row %d", getClass().getSimpleName(), rowNumber); + return format(Locale.ROOT, "%s:row %d:cursor size %d:%s", getClass().getSimpleName(), rowNumber, cursor.batchSize(), + cursor.columns()); } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaData.java index 8a819143881b7..d860c6daaa1ab 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaData.java @@ -11,6 +11,7 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; class JdbcResultSetMetaData implements ResultSetMetaData, JdbcWrapper { @@ -70,7 +71,8 @@ public int getColumnDisplaySize(int column) throws SQLException { @Override public String getColumnLabel(int column) throws SQLException { - return column(column).label; + JdbcColumnInfo info = column(column); + return true == EMPTY.equals(info.label) ? info.name : info.label; } @Override diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcTestUtils.java new file mode 100644 index 0000000000000..a956ab3fdf5db --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcTestUtils.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import java.time.Clock; +import java.time.Duration; +import java.time.ZoneId; +import java.time.ZonedDateTime; + +final class JdbcTestUtils { + + private JdbcTestUtils() {} + + static ZonedDateTime nowWithMillisResolution(ZoneId zoneId) { + Clock millisResolutionClock = Clock.tick(Clock.system(zoneId), Duration.ofMillis(1)); + return ZonedDateTime.now(millisResolutionClock); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 469a2d37e5ef4..d08496b611e0c 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -232,6 +232,8 @@ static Object convert(Object v, EsType columnType, String typeString) throws SQL case INTERVAL_HOUR_TO_SECOND: case INTERVAL_MINUTE_TO_SECOND: return Duration.parse(v.toString()); + case IP: + return v.toString(); default: throw new SQLException("Unexpected column type [" + typeString + "]"); @@ -469,7 +471,7 @@ private static Double asDouble(Object val, EsType columnType, String typeString) } private static Date asDate(Object val, EsType columnType, String typeString) throws SQLException { - if (columnType == EsType.DATETIME) { + if (columnType == EsType.DATETIME || columnType == EsType.DATE) { return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asDate, Date::new); } return failConversion(val, columnType, typeString, Date.class); @@ -479,11 +481,14 @@ private static Time asTime(Object val, EsType columnType, String typeString) thr if (columnType == EsType.DATETIME) { return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTime, Time::new); } + if (columnType == EsType.DATE) { + return new Time(0L); + } return failConversion(val, columnType, typeString, Time.class); } private static Timestamp asTimestamp(Object val, EsType columnType, String typeString) throws SQLException { - if (columnType == EsType.DATETIME) { + if (columnType == EsType.DATETIME || columnType == EsType.DATE) { return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTimestamp, Timestamp::new); } return failConversion(val, columnType, typeString, Timestamp.class); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/ColumnInfoTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/ColumnInfoTests.java index fc4e926025ed5..1bd92a20e86f8 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/ColumnInfoTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/ColumnInfoTests.java @@ -6,8 +6,6 @@ package org.elasticsearch.xpack.sql.jdbc; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.jdbc.EsType; -import org.elasticsearch.xpack.sql.jdbc.JdbcColumnInfo; import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java new file mode 100644 index 0000000000000..d326c0e624d49 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationDataSourceTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.http.MockResponse; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.sql.SQLException; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; + +public class JdbcConfigurationDataSourceTests extends WebServerTestCase { + + public void testDataSourceConfigurationWithSSLInURL() throws SQLException, URISyntaxException, IOException { + webServer().enqueue(new MockResponse().setResponseCode(200).addHeader("Content-Type", "application/json").setBody( + XContentHelper.toXContent(createCurrentVersionMainResponse(), XContentType.JSON, false).utf8ToString())); + + Map urlPropMap = JdbcConfigurationTests.sslProperties(); + Properties allProps = new Properties(); + allProps.putAll(urlPropMap); + String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); + + EsDataSource dataSource = new EsDataSource(); + String address = "jdbc:es://" + webServerAddress() + "/?" + sslUrlProps; + dataSource.setUrl(address); + JdbcConnection connection = null; + + try { + connection = (JdbcConnection) dataSource.getConnection(); + } catch (SQLException sqle) { + fail("Connection creation should have been successful. Error: " + sqle); + } + + assertEquals(address, connection.getURL()); + JdbcConfigurationTests.assertSslConfig(allProps, connection.cfg.sslConfig()); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index dac9dbba61776..a8495fbf57117 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -5,11 +5,15 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.SpecialPermission; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.client.SslConfig; +import org.elasticsearch.xpack.sql.client.SuppressForbidden; import java.net.URI; import java.net.URISyntaxException; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.sql.DriverManager; import java.sql.SQLException; import java.util.HashMap; @@ -18,7 +22,11 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.CONNECT_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.NETWORK_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PAGE_SIZE; import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.PROPERTIES_VALIDATION; +import static org.elasticsearch.xpack.sql.client.ConnectionConfiguration.QUERY_TIMEOUT; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -69,7 +77,7 @@ public void testDebugOut() throws Exception { public void testTypeInParam() throws Exception { Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.out=jdbc.out")); - assertEquals("Unknown parameter [debug.out] ; did you mean [debug.output]", e.getMessage()); + assertEquals("Unknown parameter [debug.out]; did you mean [debug.output]", e.getMessage()); } public void testDebugOutWithSuffix() throws Exception { @@ -113,6 +121,66 @@ public void testHttpWithSSLDisabledFromPropertyAndEnabledFromProtocol() throws E Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://https://test?ssl=false")); assertEquals("Cannot enable SSL: HTTPS protocol being used in the URL and SSL disabled in properties", e.getMessage()); } + + public void testValidatePropertiesDefault() { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12")); + assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); + + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?foo=bar")); + assertEquals("Unknown parameter [foo]; did you mean [ssl]", e.getMessage()); + } + + public void testValidateProperties() { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12&validate.properties=true")); + assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); + + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?&validate.properties=true&something=some_value")); + assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); + + Properties properties = new Properties(); + properties.setProperty(PROPERTIES_VALIDATION, "true"); + e = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create("jdbc:es://test:9200?something=some_value", properties, 0)); + assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); + } + + public void testNoPropertiesValidation() throws SQLException { + JdbcConfiguration ci = ci("jdbc:es://test:9200?pagee.size=12&validate.properties=false"); + assertEquals(false, ci.validateProperties()); + + // URL properties test + long queryTimeout = randomNonNegativeLong(); + long connectTimeout = randomNonNegativeLong(); + long networkTimeout = randomNonNegativeLong(); + long pageTimeout = randomNonNegativeLong(); + int pageSize = randomIntBetween(0, Integer.MAX_VALUE); + + ci = ci("jdbc:es://test:9200?validate.properties=false&something=some_value&query.timeout=" + queryTimeout + "&connect.timeout=" + + connectTimeout + "&network.timeout=" + networkTimeout + "&page.timeout=" + pageTimeout + "&page.size=" + pageSize); + assertEquals(false, ci.validateProperties()); + assertEquals(queryTimeout, ci.queryTimeout()); + assertEquals(connectTimeout, ci.connectTimeout()); + assertEquals(networkTimeout, ci.networkTimeout()); + assertEquals(pageTimeout, ci.pageTimeout()); + assertEquals(pageSize, ci.pageSize()); + + // Properties test + Properties properties = new Properties(); + properties.setProperty(PROPERTIES_VALIDATION, "false"); + properties.put(QUERY_TIMEOUT, Long.toString(queryTimeout)); + properties.put(PAGE_TIMEOUT, Long.toString(pageTimeout)); + properties.put(CONNECT_TIMEOUT, Long.toString(connectTimeout)); + properties.put(NETWORK_TIMEOUT, Long.toString(networkTimeout)); + properties.put(PAGE_SIZE, Integer.toString(pageSize)); + + // also putting validate.properties in URL to be overriden by the properties value + ci = JdbcConfiguration.create("jdbc:es://test:9200?validate.properties=true&something=some_value", properties, 0); + assertEquals(false, ci.validateProperties()); + assertEquals(queryTimeout, ci.queryTimeout()); + assertEquals(connectTimeout, ci.connectTimeout()); + assertEquals(networkTimeout, ci.networkTimeout()); + assertEquals(pageTimeout, ci.pageTimeout()); + assertEquals(pageSize, ci.pageSize()); + } public void testTimoutOverride() throws Exception { Properties properties = new Properties(); @@ -188,13 +256,20 @@ public void testSSLPropertiesOverride() throws Exception { assertSslConfig(props, JdbcConfiguration.create("jdbc:es://test?" + sslUrlProps.toString(), props, 0).sslConfig()); } + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.out") public void testDriverConfigurationWithSSLInURL() { Map urlPropMap = sslProperties(); - - Properties allProps = new Properties(); - allProps.putAll(urlPropMap); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + DriverManager.setLogWriter(new java.io.PrintWriter(System.out)); + return null; + }); + try { DriverManager.getDriver("jdbc:es://test?" + sslUrlProps); } catch (SQLException sqle) { @@ -202,28 +277,6 @@ public void testDriverConfigurationWithSSLInURL() { } } - public void testDataSourceConfigurationWithSSLInURL() throws SQLException, URISyntaxException { - Map urlPropMap = sslProperties(); - - Properties allProps = new Properties(); - allProps.putAll(urlPropMap); - String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); - - EsDataSource dataSource = new EsDataSource(); - String address = "jdbc:es://test?" + sslUrlProps; - dataSource.setUrl(address); - JdbcConnection connection = null; - - try { - connection = (JdbcConnection) dataSource.getConnection(); - } catch (SQLException sqle) { - fail("Connection creation should have been successful. Error: " + sqle); - } - - assertEquals(address, connection.getURL()); - assertSslConfig(allProps, connection.cfg.sslConfig()); - } - public void testTyposInSslConfigInUrl(){ assertJdbcSqlExceptionFromUrl("ssl.protocl", "ssl.protocol"); assertJdbcSqlExceptionFromUrl("sssl", "ssl"); @@ -246,7 +299,7 @@ public void testTyposInSslConfigInProperties() { assertJdbcSqlExceptionFromProperties("ssl.ruststore.type", "ssl.truststore.type"); } - private Map sslProperties() { + static Map sslProperties() { Map sslPropertiesMap = new HashMap<>(8); // always using "false" so that the SSLContext doesn't actually start verifying the keystore and trustore // locations, as we don't have file permissions to access them. @@ -262,7 +315,7 @@ private Map sslProperties() { return sslPropertiesMap; } - private void assertSslConfig(Properties allProperties, SslConfig sslConfig) throws URISyntaxException { + static void assertSslConfig(Properties allProperties, SslConfig sslConfig) throws URISyntaxException { // because SslConfig doesn't expose its internal properties (and it shouldn't), // we compare a newly created SslConfig with the one from the JdbcConfiguration with the equals() method SslConfig mockSslConfig = new SslConfig(allProperties, new URI("http://test:9200/")); @@ -284,6 +337,6 @@ private void assertJdbcSqlExceptionFromProperties(String wrongSetting, String co private void assertJdbcSqlException(String wrongSetting, String correctSetting, String url, Properties props) { JdbcSQLException ex = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create(url, props, 0)); - assertEquals("Unknown parameter [" + wrongSetting + "] ; did you mean [" + correctSetting + "]", ex.getMessage()); + assertEquals("Unknown parameter [" + wrongSetting + "]; did you mean [" + correctSetting + "]", ex.getMessage()); } } diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java index e45c743cf390a..2ce98f8446a04 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaDataTests.java @@ -6,17 +6,124 @@ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.jdbc.JdbcDatabaseMetaData; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; public class JdbcDatabaseMetaDataTests extends ESTestCase { - private JdbcDatabaseMetaData md = new JdbcDatabaseMetaData(null); + private JdbcDatabaseMetaData md = null; + + { + try { + md = new JdbcDatabaseMetaData( + new JdbcConnection(JdbcConfiguration.create("jdbc:es://localhost:9200/", new Properties(), 10), false)); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } public void testSeparators() throws Exception { assertEquals(":", md.getCatalogSeparator()); assertEquals("\"", md.getIdentifierQuoteString()); assertEquals("\\", md.getSearchStringEscape()); - } -} + + public void testGetProcedures() throws Exception { + testEmptySet(() -> md.getProcedures(null, null, null)); + } + + public void testGetProcedureColumns() throws Exception { + testEmptySet(() -> md.getProcedureColumns(null, null, null, null)); + } + + public void testGetColumnPrivileges() throws Exception { + testEmptySet(() -> md.getColumnPrivileges(null, null, null, null)); + } + + public void testGetTablePrivileges() throws Exception { + testEmptySet(() -> md.getTablePrivileges(null, null, null)); + } + + public void testGetBestRowIdentifier() throws Exception { + testEmptySet(() -> md.getBestRowIdentifier(null, null, null, 0, false)); + } + + public void testGetVersionColumns() throws Exception { + testEmptySet(() -> md.getVersionColumns(null, null, null)); + } + + public void testGetPrimaryKeys() throws Exception { + testEmptySet(() -> md.getPrimaryKeys(null, null, null)); + } + + public void testGetImportedKeys() throws Exception { + testEmptySet(() -> md.getImportedKeys(null, null, null)); + } + + public void testGetExportedKeys() throws Exception { + testEmptySet(() -> md.getExportedKeys(null, null, null)); + } + + public void testGetCrossReference() throws Exception { + testEmptySet(() -> md.getCrossReference(null, null, null, null, null, null)); + } + + public void testGetIndexInfo() throws Exception { + testEmptySet(() -> md.getIndexInfo(null, null, null, false, false)); + } + + public void testGetUDTs() throws Exception { + testEmptySet(() -> md.getUDTs(null, null, null, null)); + } + + public void testGetSuperTypes() throws Exception { + testEmptySet(() -> md.getSuperTypes(null, null, null)); + } + + public void testGetSuperTables() throws Exception { + testEmptySet(() -> md.getSuperTables(null, null, null)); + } + + public void testGetAttributes() throws Exception { + testEmptySet(() -> md.getAttributes(null, null, null, null)); + } + + public void testGetFunctions() throws Exception { + testEmptySet(() -> md.getFunctions(null, null, null)); + } + + public void testGetFunctionColumns() throws Exception { + testEmptySet(() -> md.getFunctionColumns(null, null, null, null)); + } + + public void testGetPseudoColumns() throws Exception { + testEmptySet(() -> md.getPseudoColumns(null, null, null, null)); + } + + public void testGetSchemas() throws Exception { + testEmptySet(() -> md.getSchemas()); + testEmptySet(() -> md.getSchemas(null, null)); + } + + private static void testEmptySet(CheckedSupplier supplier) throws SQLException { + try (ResultSet result = supplier.get()) { + assertNotNull(result); + assertFalse(result.next()); + } + } + + public void testGetClientInfoProperties() throws Exception { + try (ResultSet result = md.getClientInfoProperties()) { + assertNotNull(result); + assertTrue(result.next()); + assertNotNull(result.getString(1)); + assertEquals(-1, result.getInt(2)); + assertEquals("", result.getString(3)); + assertEquals("", result.getString(4)); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaDataTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaDataTests.java new file mode 100644 index 0000000000000..6c3b270b73140 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSetMetaDataTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.xpack.sql.client.StringUtils.EMPTY; + +public class JdbcResultSetMetaDataTests extends ESTestCase { + + private final List columns = Arrays.asList( + new JdbcColumnInfo("test_keyword", EsType.KEYWORD, EMPTY, EMPTY, EMPTY, EMPTY, 0), + new JdbcColumnInfo("test_integer", EsType.INTEGER, EMPTY, EMPTY, EMPTY, EMPTY, 11), + new JdbcColumnInfo("test_double", EsType.DOUBLE, EMPTY, EMPTY, EMPTY, EMPTY, 25), + new JdbcColumnInfo("test_long", EsType.LONG, "test_table", "test", "schema", "custom_label", 20) + ); + private final JdbcResultSetMetaData metaData = new JdbcResultSetMetaData(null, columns); + + public void testColumnsProperties() throws SQLException { + int maxColumnIndex = columns.size(); + assertEquals(false, metaData.isAutoIncrement(randomIntBetween(1, maxColumnIndex))); + assertEquals(true, metaData.isCaseSensitive(randomIntBetween(1, maxColumnIndex))); + assertEquals(true, metaData.isSearchable(randomIntBetween(1, maxColumnIndex))); + assertEquals(false, metaData.isCurrency(randomIntBetween(1, maxColumnIndex))); + assertEquals(ResultSetMetaData.columnNullableUnknown, metaData.isNullable(randomIntBetween(1, maxColumnIndex))); + assertEquals(false, metaData.isSigned(1)); + assertEquals(true, metaData.isSigned(2)); + assertEquals(true, metaData.isSigned(3)); + assertEquals(true, metaData.isSigned(4)); + } + + public void testColumnNamesAndLabels() throws SQLException { + assertEquals("test_keyword", metaData.getColumnName(1)); + assertEquals("test_integer", metaData.getColumnName(2)); + assertEquals("test_double", metaData.getColumnName(3)); + assertEquals("test_long", metaData.getColumnName(4)); + + assertEquals("test_keyword", metaData.getColumnLabel(1)); + assertEquals("test_integer", metaData.getColumnLabel(2)); + assertEquals("test_double", metaData.getColumnLabel(3)); + assertEquals("custom_label", metaData.getColumnLabel(4)); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java index 019073b1b3b39..50623907c93c3 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.jdbc; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.jdbc.SqlQueryParameterAnalyzer; import java.sql.SQLException; @@ -54,7 +53,7 @@ public void testUnclosedMultilineComment() { assertEquals("Cannot parse given sql; unclosed /* comment", exception.getMessage()); } - public void testUnclosedSingleQuoteStrign() { + public void testUnclosedSingleQuoteString() { SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT ' '' '' ")); assertEquals("Cannot parse given sql; unclosed string", exception.getMessage()); } diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/TypeConverterTests.java index 2e33f4e130741..206d12bafd80c 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/TypeConverterTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/TypeConverterTests.java @@ -10,16 +10,19 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.ReadableDateTime; +import java.sql.Date; import java.sql.Timestamp; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import static org.elasticsearch.xpack.sql.jdbc.JdbcTestUtils.nowWithMillisResolution; import static org.hamcrest.Matchers.instanceOf; public class TypeConverterTests extends ESTestCase { + private static final ZoneId UTC = ZoneId.of("Z"); public void testFloatAsNative() throws Exception { assertThat(convertAsNative(42.0f, EsType.FLOAT), instanceOf(Float.class)); @@ -40,9 +43,22 @@ public void testDoubleAsNative() throws Exception { } public void testTimestampAsNative() throws Exception { - DateTime now = DateTime.now(); - assertThat(convertAsNative(now, EsType.DATETIME), instanceOf(Timestamp.class)); - assertEquals(now.getMillis(), ((Timestamp) convertAsNative(now, EsType.DATETIME)).getTime()); + ZonedDateTime now = nowWithMillisResolution(UTC); + Object nativeObject = convertAsNative(now, EsType.DATETIME); + assertThat(nativeObject, instanceOf(Timestamp.class)); + assertEquals(now.toInstant().toEpochMilli(), ((Timestamp) nativeObject).getTime()); + } + + public void testDateAsNative() throws Exception { + ZonedDateTime now = nowWithMillisResolution(UTC); + Object nativeObject = convertAsNative(now, EsType.DATE); + assertThat(nativeObject, instanceOf(Date.class)); + assertEquals(now.toLocalDate().atStartOfDay(UTC).toInstant().toEpochMilli(), ((Date) nativeObject).getTime()); + + now = nowWithMillisResolution(ZoneId.of("Etc/GMT-10")); + nativeObject = convertAsNative(now, EsType.DATE); + assertThat(nativeObject, instanceOf(Date.class)); + assertEquals(now.toLocalDate().atStartOfDay(ZoneId.of("Etc/GMT-10")).toInstant().toEpochMilli(), ((Date) nativeObject).getTime()); } private Object convertAsNative(Object value, EsType type) throws Exception { @@ -50,11 +66,7 @@ private Object convertAsNative(Object value, EsType type) throws Exception { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); builder.field("value"); - if (value instanceof ReadableDateTime) { - builder.value(((ReadableDateTime) value).getMillis()); - } else { - builder.value(value); - } + builder.value(value); builder.endObject(); builder.close(); Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value"); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java new file mode 100644 index 0000000000000..d4ce531cd5f0f --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionParityTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.http.MockResponse; + +import java.io.IOException; +import java.sql.SQLException; + +/** + * Test class for JDBC-ES server versions checks. + * + * It's using a {@code MockWebServer} to be able to create a response just like the one an ES instance + * would create for a request to "/", where the ES version used is configurable. + */ +public class VersionParityTests extends WebServerTestCase { + + public void testExceptionThrownOnIncompatibleVersions() throws IOException, SQLException { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.CURRENT)); + prepareRequest(version); + + String url = JdbcConfiguration.URL_PREFIX + webServer().getHostName() + ":" + webServer().getPort(); + SQLException ex = expectThrows(SQLException.class, () -> new JdbcHttpClient(JdbcConfiguration.create(url, null, 0))); + assertEquals("This version of the JDBC driver is only compatible with Elasticsearch version " + + org.elasticsearch.xpack.sql.client.Version.CURRENT.toString() + + ", attempting to connect to a server version " + version.toString(), ex.getMessage()); + } + + public void testNoExceptionThrownForCompatibleVersions() throws IOException { + prepareRequest(null); + + String url = JdbcConfiguration.URL_PREFIX + webServerAddress(); + try { + new JdbcHttpClient(JdbcConfiguration.create(url, null, 0)); + } catch (SQLException sqle) { + fail("JDBC driver version and Elasticsearch server version should be compatible. Error: " + sqle); + } + } + + void prepareRequest(Version version) throws IOException { + MainResponse response = version == null ? createCurrentVersionMainResponse() : createMainResponse(version); + webServer().enqueue(new MockResponse().setResponseCode(200).addHeader("Content-Type", "application/json").setBody( + XContentHelper.toXContent(response, XContentType.JSON, false).utf8ToString())); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java new file mode 100644 index 0000000000000..50f27f9ecf39a --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/WebServerTestCase.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockWebServer; +import org.junit.After; +import org.junit.Before; + +import java.util.Date; + +/** + * Base class for unit tests that need a web server for basic tests. + */ +public abstract class WebServerTestCase extends ESTestCase { + + private MockWebServer webServer = new MockWebServer(); + + @Before + public void init() throws Exception { + webServer.start(); + } + + @After + public void cleanup() { + webServer.close(); + } + + public MockWebServer webServer() { + return webServer; + } + + MainResponse createCurrentVersionMainResponse() { + return createMainResponse(Version.CURRENT); + } + + MainResponse createMainResponse(Version version) { + String clusterUuid = randomAlphaOfLength(10); + ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); + String nodeName = randomAlphaOfLength(10); + final String date = new Date(randomNonNegativeLong()).toString(); + Build build = new Build( + Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(), + version.toString() + ); + return new MainResponse(nodeName, version, clusterName, clusterUuid , build); + } + + String webServerAddress() { + return webServer.getHostName() + ":" + webServer.getPort(); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy index 5f16c1579b0be..577795ffb7842 100644 --- a/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy +++ b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy @@ -1,4 +1,6 @@ grant { // Required for testing the Driver registration permission java.sql.SQLPermission "deregisterDriver"; + // Required for debug logging purposes + permission java.sql.SQLPermission "setLog"; }; diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index abbbd6e9663f3..24f5438091eef 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -16,12 +16,21 @@ dependencies { // CLI testing dependencies compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') - compile "org.jline:jline:3.8.2" + + // select just the parts of JLine that are needed + compile("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + compile "org.jline:jline-terminal:${jlineVersion}" + compile "org.jline:jline-reader:${jlineVersion}" + compile "org.jline:jline-style:${jlineVersion}" + + testRuntime "org.elasticsearch:jna:${versions.jna}" } /* disable unit tests because these are all integration tests used * other qa projects. */ -unitTest.enabled = false +test.enabled = false dependencyLicenses.enabled = false dependenciesInfo.enabled = false @@ -31,49 +40,6 @@ forbiddenApisMain { replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } -thirdPartyAudit.ignoreMissingClasses ( - // jLine's optional dependencies - 'org.apache.sshd.client.SshClient', - 'org.apache.sshd.client.auth.keyboard.UserInteraction', - 'org.apache.sshd.client.channel.ChannelShell', - 'org.apache.sshd.client.channel.ClientChannel', - 'org.apache.sshd.client.channel.ClientChannelEvent', - 'org.apache.sshd.client.future.AuthFuture', - 'org.apache.sshd.client.future.ConnectFuture', - 'org.apache.sshd.client.future.OpenFuture', - 'org.apache.sshd.client.session.ClientSession', - 'org.apache.sshd.common.Factory', - 'org.apache.sshd.common.channel.PtyMode', - 'org.apache.sshd.common.config.keys.FilePasswordProvider', - 'org.apache.sshd.common.util.io.NoCloseInputStream', - 'org.apache.sshd.common.util.io.NoCloseOutputStream', - 'org.apache.sshd.server.Command', - 'org.apache.sshd.server.Environment', - 'org.apache.sshd.server.ExitCallback', - 'org.apache.sshd.server.SessionAware', - 'org.apache.sshd.server.Signal', - 'org.apache.sshd.server.SshServer', - 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', - 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', - 'org.apache.sshd.server.session.ServerSession', - 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', - 'org.fusesource.jansi.Ansi', - 'org.fusesource.jansi.internal.CLibrary$Termios', - 'org.fusesource.jansi.internal.CLibrary$WinSize', - 'org.fusesource.jansi.internal.CLibrary', - 'org.fusesource.jansi.internal.Kernel32$CHAR_INFO', - 'org.fusesource.jansi.internal.Kernel32$CONSOLE_SCREEN_BUFFER_INFO', - 'org.fusesource.jansi.internal.Kernel32$COORD', - 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$INPUT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$KEY_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$SMALL_RECT', - 'org.fusesource.jansi.internal.Kernel32', - 'org.fusesource.jansi.internal.WindowsSupport', - 'org.mozilla.universalchardet.UniversalDetector', -) - subprojects { apply plugin: 'elasticsearch.standalone-rest-test' dependencies { @@ -95,7 +61,7 @@ subprojects { // TODO check if needed - testRuntime("org.antlr:antlr4-runtime:4.5.3") { + testRuntime("org.antlr:antlr4-runtime:${antlrVersion}") { transitive = false } @@ -104,7 +70,15 @@ subprojects { testRuntime (xpackProject('plugin:sql:sql-action')) { transitive = false } - testRuntime "org.jline:jline:3.8.2" + + testRuntime("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + testRuntime "org.jline:jline-terminal:${jlineVersion}" + testRuntime "org.jline:jline-reader:${jlineVersion}" + testRuntime "org.jline:jline-style:${jlineVersion}" + + testRuntime "org.elasticsearch:jna:${versions.jna}" } if (project.name != 'security') { diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index d9bffd393641d..a0e6e82ed4d67 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -7,21 +7,24 @@ Project mainProject = project group = "${group}.x-pack.qa.sql.security" // Tests are pushed down to subprojects and will be checked there. -testingConventions.enabled = false +testingConventions.enabled = false subprojects { // Use resources from the parent project in subprojects sourceSets { test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java"] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] + mainProject.sourceSets.test.output.classesDirs.each { dir -> + output.addClassesDir { dir } + output.builtBy(mainProject.tasks.testClasses) } + runtimeClasspath += mainProject.sourceSets.test.output } } + processTestResources { + from mainProject.file('src/test/resources') + } + dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" } @@ -41,8 +44,11 @@ subprojects { } integTestRunner { - systemProperty 'tests.audit.logfile', + def today = new Date().format('yyyy-MM-dd') + nonInputProperties.systemProperty 'tests.audit.logfile', "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit.json" + nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit-${today}.json" } runqa { diff --git a/x-pack/plugin/sql/qa/security/roles.yml b/x-pack/plugin/sql/qa/security/roles.yml index 337d7c7f9c7c1..141314e23f024 100644 --- a/x-pack/plugin/sql/qa/security/roles.yml +++ b/x-pack/plugin/sql/qa/security/roles.yml @@ -18,6 +18,10 @@ cli_or_drivers_minimal: privileges: [read, "indices:admin/get"] # end::cli_drivers +read_nothing: + cluster: + - "cluster:monitor/main" + read_something_else: cluster: - "cluster:monitor/main" diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index d47b06289a8b3..a911e7d4854ae 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -230,16 +230,16 @@ public void expectUnknownColumn(String user, String sql, String column) throws E @Override public void checkNoMonitorMain(String user) throws Exception { - // Most SQL actually works fine without monitor/main - expectMatchesAdmin("SELECT * FROM test", user, "SELECT * FROM test"); - expectMatchesAdmin("SHOW TABLES LIKE 'test'", user, "SHOW TABLES LIKE 'test'"); - expectMatchesAdmin("DESCRIBE test", user, "DESCRIBE test"); - - // But there are a few things that don't work - try (Connection es = es(userProperties(user))) { - expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMajorVersion()); - expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMinorVersion()); - } + // Without monitor/main the JDBC driver - ES server version comparison doesn't take place, which fails everything else + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user))); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMajorVersion()); + expectUnauthorized("cluster:monitor/main", user, () -> es(userProperties(user)).getMetaData().getDatabaseMinorVersion()); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("SELECT * FROM test")); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("SHOW TABLES LIKE 'test'")); + expectUnauthorized("cluster:monitor/main", user, + () -> es(userProperties(user)).createStatement().executeQuery("DESCRIBE test")); } private void expectUnauthorized(String action, String user, ThrowingRunnable r) { @@ -292,7 +292,7 @@ public void testMetaDataGetColumnsWorksAsFullAccess() throws Exception { expectActionMatchesAdmin( con -> con.getMetaData().getColumns(null, "%", "%t", "%"), "full_access", - con -> con.getMetaData().getColumns(null, "%", "%", "%")); + con -> con.getMetaData().getColumns(null, "%", "%t", "%")); } public void testMetaDataGetColumnsWithNoAccess() throws Exception { diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 94f6952bd6d8e..313d0cdb5cf7f 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.AfterClass; @@ -84,6 +85,7 @@ protected interface Actions { * {@code plugin-security.policy}. So we may as well have gradle set the property. */ private static final Path AUDIT_LOG_FILE = lookupAuditLog(); + private static final Path ROLLED_OVER_AUDIT_LOG_FILE = lookupRolledOverAuditLog(); @SuppressForbidden(reason="security doesn't work with mock filesystem") private static Path lookupAuditLog() { @@ -95,6 +97,16 @@ private static Path lookupAuditLog() { } return Paths.get(auditLogFileString); } + + @SuppressForbidden(reason="security doesn't work with mock filesystem") + private static Path lookupRolledOverAuditLog() { + String auditLogFileString = System.getProperty("tests.audit.yesterday.logfile"); + if (null == auditLogFileString) { + throw new IllegalStateException("tests.audit.yesterday.logfile must be set to run this test. It should be automatically " + + "set by gradle."); + } + return Paths.get(auditLogFileString); + } private static boolean oneTimeSetup = false; private static boolean auditFailure = false; @@ -107,7 +119,12 @@ private static Path lookupAuditLog() { /** * How much of the audit log was written before the test started. */ - private long auditLogWrittenBeforeTestStart; + private static long auditLogWrittenBeforeTestStart; + + /** + * If the audit log file rolled over. This is a rare case possible only at midnight. + */ + private static boolean auditFileRolledOver = false; public SqlSecurityTestCase(Actions actions) { this.actions = actions; @@ -164,13 +181,23 @@ public void setInitialAuditLogOffset() { return null; } if (false == Files.isRegularFile(AUDIT_LOG_FILE)) { - throw new IllegalStateException("expected tests.audit.logfile [" + AUDIT_LOG_FILE + "]to be a plain file but wasn't"); + throw new IllegalStateException("expected tests.audit.logfile [" + AUDIT_LOG_FILE + "] to be a plain file but wasn't"); } try { auditLogWrittenBeforeTestStart = Files.size(AUDIT_LOG_FILE); } catch (IOException e) { throw new RuntimeException(e); } + + // The log file can roll over without being caught by assertLogs() method: in those tests where exceptions are being handled + // and no audit logs being read (and, thus, assertLogs() is not called) - for example testNoMonitorMain() method: there are no + // calls to auditLogs(), and the method could run while the audit file is rolled over. + // If this happens, next call to auditLogs() will make the tests read from the rolled over file using the main audit file + // offset, which will most likely not going to work since the offset will happen somewhere in the middle of a json line. + if (auditFileRolledOver == false && Files.exists(ROLLED_OVER_AUDIT_LOG_FILE)) { + // once the audit file rolled over, it will stay like this + auditFileRolledOver = true; + } return null; }); } @@ -551,56 +578,92 @@ public void assertLogs() throws Exception { assertFalse("Previous test had an audit-related failure. All subsequent audit related assertions are bogus because we can't " + "guarantee that we fully cleaned up after the last test.", auditFailure); try { + // use a second variable since the `assertBusy()` block can be executed multiple times and the + // static auditFileRolledOver value can change and mess up subsequent calls of this code block + boolean localAuditFileRolledOver = auditFileRolledOver; assertBusy(() -> { SecurityManager sm = System.getSecurityManager(); if (sm != null) { sm.checkPermission(new SpecialPermission()); } - BufferedReader logReader = AccessController.doPrivileged((PrivilegedAction) () -> { + + BufferedReader[] logReaders = new BufferedReader[2]; + AccessController.doPrivileged((PrivilegedAction) () -> { try { - return Files.newBufferedReader(AUDIT_LOG_FILE, StandardCharsets.UTF_8); + // the audit log file rolled over during the test + // and we need to consume the rest of the rolled over file plus the new audit log file + if (localAuditFileRolledOver == false && Files.exists(ROLLED_OVER_AUDIT_LOG_FILE)) { + // once the audit file rolled over, it will stay like this + auditFileRolledOver = true; + // the order in the array matters, as the readers will be used in that order + logReaders[0] = Files.newBufferedReader(ROLLED_OVER_AUDIT_LOG_FILE, StandardCharsets.UTF_8); + } + logReaders[1] = Files.newBufferedReader(AUDIT_LOG_FILE, StandardCharsets.UTF_8); + return null; } catch (IOException e) { throw new RuntimeException(e); } }); - logReader.skip(auditLogWrittenBeforeTestStart); + + // The "index" is used as a way of reading from both rolled over file and current audit file in order: rolled over file + // first, then the audit log file. Very rarely we will read from the rolled over file: when the test happened to run + // at midnight and the audit file rolled over during the test. + int index; + if (logReaders[0] != null) { + logReaders[0].skip(auditLogWrittenBeforeTestStart); + // start with the rolled over file first + index = 0; + } else { + // the current audit log file reader should always be non-null + logReaders[1].skip(auditLogWrittenBeforeTestStart); + // start with the current audit logging file + index = 1; + } List> logs = new ArrayList<>(); String line; - while ((line = logReader.readLine()) != null) { - try { - final Map log = XContentHelper.convertToMap(JsonXContent.jsonXContent, line, false); - if (false == ("access_denied".equals(log.get("event.action")) - || "access_granted".equals(log.get("event.action")))) { - continue; - } - assertThat(log.containsKey("action"), is(true)); - if (false == (SQL_ACTION_NAME.equals(log.get("action")) - || GetIndexAction.NAME.equals(log.get("action")) - || FieldCapabilitiesAction.NAME.equals(log.get("action")))) { - // TODO we may want to extend this and the assertions to SearchAction.NAME as well - continue; + while (index < 2) { + line = logReaders[index].readLine(); + // when the end of the file is reached, either stop or move to the next reader + if (line == null) { + if (++index == 2) { + break; } - assertThat(log.containsKey("user.name"), is(true)); - List indices = new ArrayList<>(); - if (log.containsKey("indices")) { - indices = (ArrayList) log.get("indices"); - if ("test_admin".equals(log.get("user.name"))) { - /* - * Sometimes we accidentally sneak access to the security tables. This is fine, - * SQL drops them from the interface. So we might have access to them, but we - * don't show them. - */ - indices.remove(".security"); - indices.remove(".security-6"); + } + else { + try { + final Map log = XContentHelper.convertToMap(JsonXContent.jsonXContent, line, false); + if (false == ("access_denied".equals(log.get("event.action")) + || "access_granted".equals(log.get("event.action")))) { + continue; + } + assertThat(log.containsKey("action"), is(true)); + if (false == (SQL_ACTION_NAME.equals(log.get("action")) + || GetIndexAction.NAME.equals(log.get("action")) + || FieldCapabilitiesAction.NAME.equals(log.get("action")))) { + // TODO we may want to extend this and the assertions to SearchAction.NAME as well + continue; + } + assertThat(log.containsKey("user.name"), is(true)); + List indices = new ArrayList<>(); + if (log.containsKey("indices")) { + indices = (ArrayList) log.get("indices"); + if ("test_admin".equals(log.get("user.name"))) { + /* + * Sometimes we accidentally sneak access to the security tables. This is fine, + * SQL drops them from the interface. So we might have access to them, but we + * don't show them. + */ + indices.removeAll(RestrictedIndicesNames.RESTRICTED_NAMES); + } } + // Use a sorted list for indices for consistent error reporting + Collections.sort(indices); + log.put("indices", indices); + logs.add(log); + } catch (final ElasticsearchParseException e) { + throw new IllegalArgumentException("Unrecognized log: " + line, e); } - // Use a sorted list for indices for consistent error reporting - Collections.sort(indices); - log.put("indices", indices); - logs.add(log); - } catch (final ElasticsearchParseException e) { - throw new IllegalArgumentException("Unrecognized log: " + line, e); } } List> allLogs = new ArrayList<>(logs); diff --git a/x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy b/x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy index d013547b9fd5f..434fdee0a8d20 100644 --- a/x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy +++ b/x-pack/plugin/sql/qa/security/src/test/resources/plugin-security.policy @@ -1,6 +1,7 @@ grant { // Needed to read the audit log file permission java.io.FilePermission "${tests.audit.logfile}", "read"; + permission java.io.FilePermission "${tests.audit.yesterday.logfile}", "read"; //// Required by ssl subproject: // Required for the net client to setup ssl rather than use global ssl. diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java index 5a8f757209ff7..102491a95ddb7 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java @@ -153,6 +153,7 @@ public void testExplainWithCount() throws IOException { assertThat(readLine(), startsWith(" }")); assertThat(readLine(), startsWith(" }")); assertThat(readLine(), startsWith(" ]")); + assertThat(readLine(), startsWith(" \"track_total_hits\" : 2147483647")); assertThat(readLine(), startsWith("}]")); assertEquals("", readLine()); } diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java index 66ac2e2c7df24..135b3ed57223f 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcCsvSpecIT.java @@ -5,10 +5,26 @@ */ package org.elasticsearch.xpack.sql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.xpack.sql.qa.jdbc.CsvSpecTestCase; import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; + public class JdbcCsvSpecIT extends CsvSpecTestCase { + + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + List list = new ArrayList<>(); + list.addAll(CsvSpecTestCase.readScriptSpec()); + return readScriptSpec("/single-node-only/command-sys.csv-spec", specParser()); + } + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } @@ -16,7 +32,7 @@ public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer @Override protected int fetchSize() { // using a smaller fetchSize for nested documents' tests to uncover bugs - // similar with https://github.com/elastic/elasticsearch/issues/35176 quicker + // similar to https://github.com/elastic/elasticsearch/issues/35176 quicker return fileName.startsWith("nested") && randomBoolean() ? randomIntBetween(1,5) : super.fetchSize(); } } diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java index f89f801d28203..6cd53d22a1735 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java @@ -69,7 +69,7 @@ protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLEx // uncomment this to printout the result set and create new CSV tests // //JdbcTestUtils.logLikeCLI(elastic, log); - JdbcAssert.assertResultSets(expected, elastic, log, true, false); + JdbcAssert.assertResultSets(expected, elastic, log, true, true); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetMetaDataIT.java similarity index 55% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java rename to x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetMetaDataIT.java index e09f73a688e57..85b85f2b0a6d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcResultSetMetaDataIT.java @@ -3,11 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.upgrade; -public final class IndexUpgradeCheckVersion { - public static final int UPRADE_VERSION = 6; +package org.elasticsearch.xpack.sql.qa.single_node; - private IndexUpgradeCheckVersion() {} +import org.elasticsearch.xpack.sql.qa.jdbc.ResultSetMetaDataTestCase; + +public class JdbcResultSetMetaDataIT extends ResultSetMetaDataTestCase { } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java index 5e3b034d75708..c5ae7f63ad06d 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/ErrorsTestCase.java @@ -21,4 +21,5 @@ public interface ErrorsTestCase { void testSelectGroupByScore() throws Exception; void testSelectScoreSubField() throws Exception; void testSelectScoreInScalar() throws Exception; + void testHardLimitForSortOnAggregate() throws Exception; } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index e346bfc649b23..547cf73131be4 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -67,11 +67,16 @@ public void testTextualType() throws IOException { public void testDateTimes() throws IOException { assertQuery("SELECT CAST('2019-01-14T12:29:25.000Z' AS DATETIME)", "CAST('2019-01-14T12:29:25.000Z' AS DATETIME)", - "datetime", "2019-01-14T12:29:25.000Z", 24); + "datetime", "2019-01-14T12:29:25.000Z", 29); assertQuery("SELECT CAST(-26853765751000 AS DATETIME)", "CAST(-26853765751000 AS DATETIME)", - "datetime", "1119-01-15T12:37:29.000Z", 24); + "datetime", "1119-01-15T12:37:29.000Z", 29); assertQuery("SELECT CAST(CAST('-26853765751000' AS BIGINT) AS DATETIME)", "CAST(CAST('-26853765751000' AS BIGINT) AS DATETIME)", - "datetime", "1119-01-15T12:37:29.000Z", 24); + "datetime", "1119-01-15T12:37:29.000Z", 29); + + assertQuery("SELECT CAST('2019-01-14' AS DATE)", "CAST('2019-01-14' AS DATE)", + "date", "2019-01-14T00:00:00.000Z", 29); + assertQuery("SELECT CAST(-26853765751000 AS DATE)", "CAST(-26853765751000 AS DATE)", + "date", "1119-01-15T00:00:00.000Z", 29); } public void testIPs() throws IOException { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java index e7a73cd12d524..cf221bbc14012 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/CliIntegrationTestCase.java @@ -56,8 +56,8 @@ protected SecurityConfig securityConfig() { return null; } - protected void index(String index, CheckedConsumer body) throws IOException { - Request request = new Request("PUT", "/" + index + "/_doc/1"); + protected void index(String index, int docId, CheckedConsumer body) throws IOException { + Request request = new Request("PUT", "/" + index + "/_doc/" + docId); request.addParameter("refresh", "true"); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); @@ -66,6 +66,10 @@ protected void index(String index, CheckedConsumer client().performRequest(request); } + protected void index(String index, CheckedConsumer body) throws IOException { + index(index, 1, body); + } + public String command(String command) throws IOException { return cli.command(command); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java index ca251a31844c0..a3ad325d0acec 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/ErrorsTestCase.java @@ -97,8 +97,15 @@ public void testSelectScoreInScalar() throws Exception { assertEquals("line 1:12: [SCORE()] cannot be an argument to a function" + END, readLine()); } + @Override + public void testHardLimitForSortOnAggregate() throws Exception { + index("test", body -> body.field("a", 1).field("b", 2)); + String commandResult = command("SELECT max(a) max FROM test GROUP BY b ORDER BY max LIMIT 10000"); + assertEquals(START + "Bad request [[3;33;22mThe maximum LIMIT for aggregate sorting is [512], received [10000]" + END, + commandResult); + } + public static void assertFoundOneProblem(String commandResult) { assertEquals(START + "Bad request [[3;33;22mFound 1 problem(s)", commandResult); } - } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java index 84f74bcbac137..02de2dff4f7d6 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/cli/FetchSizeTestCase.java @@ -49,4 +49,32 @@ public void testInvalidFetchSize() throws IOException { assertEquals(ErrorsTestCase.START + "Invalid fetch size [[3;33;22m" + Long.MAX_VALUE + ErrorsTestCase.END, command("fetch size = " + Long.MAX_VALUE)); } + + // Test for issue: https://github.com/elastic/elasticsearch/issues/42851 + // Even though fetch size and limit are smaller than the noRows, all buckets + // should be processed to achieve the global ordering of the aggregate function. + public void testOrderingOnAggregate() throws IOException { + Request request = new Request("PUT", "/test/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); + for (int i = 1; i <= 100; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"a\":").append(i).append(", \"b\" : ").append(i).append("}\n"); + } + request.setJsonEntity(bulk.toString()); + client().performRequest(request); + + assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4")); + assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"", + command("fetch separator = \" -- fetch sep -- \"")); + assertThat(command("SELECT max(b) FROM test GROUP BY a ORDER BY max(b) DESC LIMIT 20"), containsString("max(b)")); + assertThat(readLine(), containsString("----------")); + for (int i = 100; i > 80; i--) { + if (i < 100 && i % 4 == 0) { + assertThat(readLine(), containsString(" -- fetch sep -- ")); + } + assertThat(readLine(), containsString(Integer.toString(i))); + } + assertEquals("", readLine()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java index 47e0e9c8f90df..81516154bf57a 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java @@ -31,19 +31,24 @@ public abstract class CsvSpecTestCase extends SpecBaseIntegrationTestCase { public static List readScriptSpec() throws Exception { Parser parser = specParser(); List tests = new ArrayList<>(); - tests.addAll(readScriptSpec("/select.csv-spec", parser)); - tests.addAll(readScriptSpec("/command.csv-spec", parser)); - tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); tests.addAll(readScriptSpec("/agg.csv-spec", parser)); + tests.addAll(readScriptSpec("/alias.csv-spec", parser)); + tests.addAll(readScriptSpec("/arithmetic.csv-spec", parser)); tests.addAll(readScriptSpec("/columns.csv-spec", parser)); + tests.addAll(readScriptSpec("/command.csv-spec", parser)); tests.addAll(readScriptSpec("/date.csv-spec", parser)); tests.addAll(readScriptSpec("/datetime.csv-spec", parser)); - tests.addAll(readScriptSpec("/alias.csv-spec", parser)); - tests.addAll(readScriptSpec("/null.csv-spec", parser)); - tests.addAll(readScriptSpec("/nested.csv-spec", parser)); + tests.addAll(readScriptSpec("/datetime-interval.csv-spec", parser)); + tests.addAll(readScriptSpec("/field-alias.csv-spec", parser)); + tests.addAll(readScriptSpec("/filter.csv-spec", parser)); + tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); tests.addAll(readScriptSpec("/functions.csv-spec", parser)); + tests.addAll(readScriptSpec("/ip.csv-spec", parser)); tests.addAll(readScriptSpec("/math.csv-spec", parser)); - tests.addAll(readScriptSpec("/field-alias.csv-spec", parser)); + tests.addAll(readScriptSpec("/null.csv-spec", parser)); + tests.addAll(readScriptSpec("/nested.csv-spec", parser)); + tests.addAll(readScriptSpec("/select.csv-spec", parser)); + return tests; } @@ -66,6 +71,6 @@ protected final void doTest() throws Throwable { @Override protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { Logger log = logEsResultSet() ? logger : null; - JdbcAssert.assertResultSets(expected, elastic, log, false, false); + JdbcAssert.assertResultSets(expected, elastic, log, false, true); } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java index 8cc8cf6e04044..6376bd13308d6 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvTestUtils.java @@ -155,6 +155,8 @@ private static String resolveColumnType(String type) { return "timestamp"; case "bt": return "byte"; + case "sh": + return "short"; default: return type; } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java deleted file mode 100644 index d5a633e5ea388..0000000000000 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DebugCsvSpec.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.qa.jdbc; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; -import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.executeCsvQuery; -import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.specParser; - -@TestLogging("org.elasticsearch.xpack.sql:TRACE") -public abstract class DebugCsvSpec extends SpecBaseIntegrationTestCase { - private final CsvTestCase testCase; - - @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) - public static List readScriptSpec() throws Exception { - Parser parser = specParser(); - return readScriptSpec("/debug.csv-spec", parser); - } - - public DebugCsvSpec(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { - super(fileName, groupName, testName, lineNumber); - this.testCase = testCase; - } - - @Override - protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { - Logger log = logEsResultSet() ? logger : null; - - // - // uncomment this to printout the result set and create new CSV tests - // - JdbcTestUtils.logResultSetMetadata(elastic, log); - JdbcTestUtils.logResultSetData(elastic, log); - //JdbcAssert.assertResultSets(expected, elastic, log); - } - - @Override - protected boolean logEsResultSet() { - return true; - } - - @Override - protected final void doTest() throws Throwable { - try (Connection csv = csvConnection(testCase); Connection es = esJdbc()) { - // pass the testName as table for debugging purposes (in case the underlying reader is missing) - ResultSet expected = executeCsvQuery(csv, testName); - ResultSet elasticResults = executeJdbcQuery(es, testCase.query); - assertResults(expected, elasticResults); - } - } -} \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java index be3ba3d096ae2..6f12963634fdb 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ErrorsTestCase.java @@ -116,4 +116,14 @@ public void testSelectScoreInScalar() throws Exception { assertThat(e.getMessage(), startsWith("Found 1 problem(s)\nline 1:12: [SCORE()] cannot be an argument to a function")); } } + + @Override + public void testHardLimitForSortOnAggregate() throws Exception { + index("test", body -> body.field("a", 1).field("b", 2)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT max(a) max FROM test GROUP BY b ORDER BY max LIMIT 10000").executeQuery()); + assertEquals("The maximum LIMIT for aggregate sorting is [512], received [10000]", e.getMessage()); + } + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java index 7832f7d35d7a0..5a5ac373cd4e4 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcIntegrationTestCase.java @@ -50,9 +50,33 @@ public static String elasticsearchAddress() { } public Connection esJdbc() throws SQLException { - return randomBoolean() ? useDriverManager() : useDataSource(); + return esJdbc(connectionProperties()); } + public Connection esJdbc(Properties props) throws SQLException { + return createConnection(props); + } + + protected Connection createConnection(Properties connectionProperties) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + String address = "jdbc:es://" + elasticsearchAddress; + Connection connection = null; + if (randomBoolean()) { + connection = DriverManager.getConnection(address, connectionProperties); + } else { + EsDataSource dataSource = new EsDataSource(); + dataSource.setUrl(address); + dataSource.setProperties(connectionProperties); + connection = dataSource.getConnection(); + } + + assertNotNull("The timezone should be specified", connectionProperties.getProperty("timezone")); + return connection; + } + + // + // methods below are used inside the documentation only + // protected Connection useDriverManager() throws SQLException { String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); // tag::connect-dm @@ -93,6 +117,12 @@ public static void index(String index, String documentId, CheckedConsumer 1) { - sb.append(" | "); - } - sb.append(trimOrPad(column.append(rs.getString(i)))); - } - log.info(sb); + log.info(rowAsString(rs, columns)); } } - public static String resultSetCurrentData(ResultSet rs) throws SQLException { + static String resultSetCurrentData(ResultSet rs) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); - StringBuilder column = new StringBuilder(); - - int columns = metaData.getColumnCount(); + return rowAsString(rs, metaData.getColumnCount()); + } + private static String rowAsString(ResultSet rs, int columns) throws SQLException { StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); for (int i = 1; i <= columns; i++) { column.setLength(0); if (i > 1) { @@ -137,7 +131,18 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { logger.info("\n" + formatter.formatWithHeader(cols, data)); } - public static String of(long millis) { - return StringUtils.toString(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC)); + static String of(long millis, String zoneId) { + return StringUtils.toString(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(zoneId))); + } + + static Date asDate(long millis, ZoneId zoneId) { + return new java.sql.Date( + ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalDate().atStartOfDay(zoneId).toInstant().toEpochMilli()); + } + + static Time asTime(long millis, ZoneId zoneId) { + return new Time(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalTime().atDate(JdbcTestUtils.EPOCH).atZone(zoneId).toInstant().toEpochMilli()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetMetaDataTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetMetaDataTestCase.java new file mode 100644 index 0000000000000..b8ab74bd33f5c --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetMetaDataTestCase.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.qa.jdbc; + +import org.elasticsearch.common.CheckedConsumer; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public class ResultSetMetaDataTestCase extends JdbcIntegrationTestCase { + + private final String[] fieldsNames = new String[] {"test_byte", "test_integer", "test_long", "test_short", + "test_double", "test_float", "test_keyword", "test_boolean", "test_date"}; + + public void testValidGetObjectCalls() throws Exception { + ResultSetTestCase.createIndex("test"); + ResultSetTestCase.updateMapping("test", builder -> { + for(String field : fieldsNames) { + builder.startObject(field).field("type", field.substring(5)).endObject(); + } + }); + + String q = "SELECT test_byte, test_integer, test_long, test_short, test_double, test_float, test_keyword, " + + "test_boolean, test_date FROM test"; + doWithQuery(q, (r) -> assertColumnNamesAndLabels(r.getMetaData(), fieldsNames)); + + q = "SELECT test_byte AS b, test_integer AS i, test_long AS l, test_short AS s, test_double AS d, test_float AS f, " + + "test_keyword AS k, test_boolean AS bool, test_date AS dt FROM test"; + doWithQuery(q, (r) -> assertColumnNamesAndLabels(r.getMetaData(), new String[] {"b", "i", "l", "s", "d", "f", "k", "bool", "dt"})); + } + + private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + assertEquals(fieldsNames.length, results.getMetaData().getColumnCount()); + consumer.accept(results); + } + } + } + } + + private void assertColumnNamesAndLabels(ResultSetMetaData metaData, String[] names) throws SQLException { + for(int i = 0; i < fieldsNames.length; i++) { + assertEquals(names[i], metaData.getColumnName(i + 1)); + assertEquals(names[i], metaData.getColumnLabel(i + 1)); + } + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index 2550026c153a5..b8cd81e39f545 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -9,12 +9,13 @@ import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.sql.jdbc.EsDataSource; import org.elasticsearch.xpack.sql.jdbc.EsType; +import org.junit.Before; import java.io.IOException; import java.io.InputStream; @@ -22,7 +23,6 @@ import java.sql.Blob; import java.sql.Clob; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -32,6 +32,8 @@ import java.sql.SQLType; import java.sql.Timestamp; import java.sql.Types; +import java.time.Instant; +import java.time.ZoneId; import java.util.Arrays; import java.util.Calendar; import java.util.Date; @@ -58,6 +60,8 @@ import static java.util.Calendar.SECOND; import static java.util.Calendar.YEAR; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asDate; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asTime; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.of; public class ResultSetTestCase extends JdbcIntegrationTestCase { @@ -79,7 +83,73 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { dateTimeTestingFields.put(new Tuple("test_float", 1f), EsType.FLOAT); dateTimeTestingFields.put(new Tuple("test_keyword", "true"), EsType.KEYWORD); } + + private String timeZoneId; + + @Before + public void chooseRandomTimeZone() { + this.timeZoneId = randomKnownTimeZone(); + } + + public void testMultiValueFieldWithMultiValueLeniencyEnabled() throws Exception { + createTestDataForMultiValueTests(); + + doWithQuery(() -> esWithLeniency(true), "SELECT int, keyword FROM test", (results) -> { + results.next(); + Object number = results.getObject(1); + Object string = results.getObject(2); + assertEquals(-10, number); + assertEquals("-10", string); + assertFalse(results.next()); + }); + } + + public void testMultiValueFieldWithMultiValueLeniencyDisabled() throws Exception { + createTestDataForMultiValueTests(); + + SQLException expected = expectThrows(SQLException.class, + () -> doWithQuery(() -> esWithLeniency(false), "SELECT int, keyword FROM test", (results) -> { + })); + assertTrue(expected.getMessage().contains("Arrays (returned by [int]) are not supported")); + + // default has multi value disabled + expected = expectThrows(SQLException.class, + () -> doWithQuery(() -> esJdbc(), "SELECT int, keyword FROM test", (results) -> { + })); + } + + public void testMultiValueFields_InsideObjects_WithMultiValueLeniencyEnabled() throws Exception { + createTestDataForMultiValuesInObjectsTests(); + + doWithQuery(() -> esWithLeniency(true), "SELECT object.intsubfield, object.textsubfield, object.textsubfield.keyword FROM test", + (results) -> { + results.next(); + Object number = results.getObject(1); + Object text = results.getObject(2); + Object keyword = results.getObject(3); + assertEquals(-25, number); + assertEquals("xyz", text); + assertEquals("-25", keyword); + assertFalse(results.next()); + }); + } + public void testMultiValueFields_InsideObjects_WithMultiValueLeniencyDisabled() throws Exception { + createTestDataForMultiValuesInObjectsTests(); + + SQLException expected = expectThrows(SQLException.class, + () -> doWithQuery(() -> esWithLeniency(false), "SELECT object.intsubfield, object.textsubfield, object.textsubfield.keyword" + + " FROM test", (results) -> { + })); + assertTrue(expected.getMessage().contains("Arrays (returned by [object.intsubfield]) are not supported")); + + // default has multi value disabled + expected = expectThrows(SQLException.class, + () -> doWithQuery(() -> esJdbc(), "SELECT object.intsubfield, object.textsubfield, object.textsubfield.keyword", + (results) -> { + })); + } + // Byte values testing public void testGettingValidByteWithoutCasting() throws Exception { byte random1 = randomByte(); @@ -201,10 +271,10 @@ public void testGettingInvalidByte() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getByte("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Byte]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Byte]", asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Byte]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Byte]", asDateString(randomDate)), sqle.getMessage()); }); } @@ -324,10 +394,10 @@ public void testGettingInvalidShort() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getShort("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Short]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Short]", asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Short]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Short]", asDateString(randomDate)), sqle.getMessage()); }); } @@ -439,11 +509,11 @@ public void testGettingInvalidInteger() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getInt("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Integer]", of(randomDate)), - sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Integer]", + asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Integer]", of(randomDate)), - sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Integer]", + asDateString(randomDate)), sqle.getMessage()); }); } @@ -541,10 +611,10 @@ public void testGettingInvalidLong() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getLong("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Long]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Long]", asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Long]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Long]", asDateString(randomDate)), sqle.getMessage()); }); } @@ -624,10 +694,10 @@ public void testGettingInvalidDouble() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Double]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Double]", asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Double]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Double]", asDateString(randomDate)), sqle.getMessage()); }); } @@ -707,10 +777,10 @@ public void testGettingInvalidFloat() throws Exception { sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Float]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Float]", asDateString(randomDate)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Float]", of(randomDate)), + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Float]", asDateString(randomDate)), sqle.getMessage()); }); } @@ -768,8 +838,8 @@ public void testGettingBooleanValues() throws Exception { assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); } SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", of(randomDate1)), - sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", + asDateString(randomDate1)), sqle.getMessage()); results.next(); assertEquals(false, results.getBoolean("test_boolean")); @@ -778,12 +848,12 @@ public void testGettingBooleanValues() throws Exception { assertEquals("Expected: but was: for field " + fld, false, results.getObject(fld, Boolean.class)); } sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", of(randomDate2)), - sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", + asDateString(randomDate2)), sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class)); - assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", of(randomDate2)), - sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATETIME] to [Boolean]", + asDateString(randomDate2)), sqle.getMessage()); results.next(); for(String fld : fieldsNames.stream() @@ -808,26 +878,18 @@ public void testGettingDateWithoutCalendar() throws Exception { Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); - String timeZoneId = randomKnownTimeZone(); - Calendar connCalendar = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); - - doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - connCalendar.setTimeInMillis(randomLongDate); - connCalendar.set(HOUR_OF_DAY, 0); - connCalendar.set(MINUTE, 0); - connCalendar.set(SECOND, 0); - connCalendar.set(MILLISECOND, 0); - - assertEquals(results.getDate("test_date"), new java.sql.Date(connCalendar.getTimeInMillis())); - assertEquals(results.getDate(9), new java.sql.Date(connCalendar.getTimeInMillis())); - assertEquals(results.getObject("test_date", java.sql.Date.class), - new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); - assertEquals(results.getObject(9, java.sql.Date.class), - new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + + java.sql.Date expectedDate = asDate(randomLongDate, getZoneFromOffset(randomLongDate)); + + assertEquals(expectedDate, results.getDate("test_date")); + assertEquals(expectedDate, results.getDate(9)); + assertEquals(expectedDate, results.getObject("test_date", java.sql.Date.class)); + assertEquals(expectedDate, results.getObject(9, java.sql.Date.class)); // bulk validation for all fields which are not of type date - validateErrorsForDateTimeTestsWithoutCalendar(results::getDate); + validateErrorsForDateTestsWithoutCalendar(results::getDate); }); } @@ -844,11 +906,10 @@ public void testGettingDateWithCalendar() throws Exception { builder.timeField("test_date", null); }); - String timeZoneId = randomKnownTimeZone(); String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); - doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); c.setTimeInMillis(randomLongDate); c.set(HOUR_OF_DAY, 0); @@ -876,29 +937,21 @@ public void testGettingTimeWithoutCalendar() throws Exception { }); Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); - - String timeZoneId = randomKnownTimeZone(); - Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); - - doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - c.setTimeInMillis(randomLongDate); - c.set(ERA, GregorianCalendar.AD); - c.set(YEAR, 1970); - c.set(MONTH, 0); - c.set(DAY_OF_MONTH, 1); - - assertEquals(results.getTime("test_date"), new java.sql.Time(c.getTimeInMillis())); - assertEquals(results.getTime(9), new java.sql.Time(c.getTimeInMillis())); - assertEquals(results.getObject("test_date", java.sql.Time.class), - new java.sql.Time(randomLongDate % 86400000L)); - assertEquals(results.getObject(9, java.sql.Time.class), - new java.sql.Time(randomLongDate % 86400000L)); - - validateErrorsForDateTimeTestsWithoutCalendar(results::getTime); + + java.sql.Time expectedTime = asTime(randomLongDate, getZoneFromOffset(randomLongDate)); + + assertEquals(expectedTime, results.getTime("test_date")); + assertEquals(expectedTime, results.getTime(9)); + assertEquals(expectedTime, results.getObject("test_date", java.sql.Time.class)); + assertEquals(expectedTime, results.getObject(9, java.sql.Time.class)); + + validateErrorsForTimeTestsWithoutCalendar(results::getTime); }); } - + public void testGettingTimeWithCalendar() throws Exception { createIndex("test"); updateMappingForNumericValuesTests("test"); @@ -912,11 +965,10 @@ public void testGettingTimeWithCalendar() throws Exception { builder.timeField("test_date", null); }); - String timeZoneId = randomKnownTimeZone(); String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); - doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); c.setTimeInMillis(randomLongDate); c.set(ERA, GregorianCalendar.AD); @@ -989,11 +1041,10 @@ public void testGettingTimestampWithCalendar() throws Exception { builder.timeField("test_date", null); }); - String timeZoneId = randomKnownTimeZone(); String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); - doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); c.setTimeInMillis(randomLongDate); @@ -1006,7 +1057,100 @@ public void testGettingTimestampWithCalendar() throws Exception { assertNull(results.getTimestamp("test_date")); }); } - + + public void testScalarOnDates() throws Exception { + createIndex("test"); + updateMapping("test", builder -> builder.startObject("test_date").field("type", "date").endObject()); + + // 2018-03-12 17:00:00 UTC + Long dateInMillis = 1520874000000L; + index("test", "1", builder -> builder.field("test_date", dateInMillis)); + + // UTC +10 hours + String timeZoneId1 = "Etc/GMT-10"; + Calendar connCalendar1 = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId1), Locale.ROOT); + + doWithQueryAndTimezone("SELECT test_date, DAY_OF_MONTH(test_date) as day FROM test", timeZoneId1, results -> { + results.next(); + connCalendar1.setTimeInMillis(dateInMillis); + connCalendar1.set(HOUR_OF_DAY, 0); + connCalendar1.set(MINUTE, 0); + connCalendar1.set(SECOND, 0); + connCalendar1.set(MILLISECOND, 0); + + java.sql.Date expectedDate = new java.sql.Date(connCalendar1.getTimeInMillis()); + assertEquals(expectedDate, results.getDate("test_date")); + assertEquals(expectedDate, results.getDate(1)); + assertEquals(expectedDate, results.getObject("test_date", java.sql.Date.class)); + assertEquals(expectedDate, results.getObject(1, java.sql.Date.class)); + + // +1 day + assertEquals(13, results.getInt("day")); + }); + + delete("test", "1"); + + // 2018-03-12 05:00:00 UTC + Long dateInMillis2 = 1520830800000L; + index("test", "1", builder -> builder.field("test_date", dateInMillis2)); + + // UTC -10 hours + String timeZoneId2 = "Etc/GMT+10"; + Calendar connCalendar2 = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId2), Locale.ROOT); + + + doWithQueryAndTimezone("SELECT test_date, DAY_OF_MONTH(test_date) as day FROM test", timeZoneId2, results -> { + results.next(); + connCalendar2.setTimeInMillis(dateInMillis2); + connCalendar2.set(HOUR_OF_DAY, 0); + connCalendar2.set(MINUTE, 0); + connCalendar2.set(SECOND, 0); + connCalendar2.set(MILLISECOND, 0); + + java.sql.Date expectedDate = new java.sql.Date(connCalendar2.getTimeInMillis()); + assertEquals(expectedDate, results.getDate("test_date")); + assertEquals(expectedDate, results.getDate(1)); + assertEquals(expectedDate, results.getObject("test_date", java.sql.Date.class)); + assertEquals(expectedDate, results.getObject(1, java.sql.Date.class)); + + // -1 day + assertEquals(11, results.getInt("day")); + }); + } + + public void testGetDateType() throws Exception { + createIndex("test"); + updateMapping("test", builder -> builder.startObject("test_date").field("type", "date").endObject()); + + // 2018-03-12 17:00:00 UTC + Long timeInMillis = 1520874000123L; + index("test", "1", builder -> builder.field("test_date", timeInMillis)); + + // UTC +10 hours + String timeZoneId1 = "Etc/GMT-10"; + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId1), Locale.ROOT); + + doWithQueryAndTimezone("SELECT CAST(test_date AS DATE) as date FROM test", timeZoneId1, results -> { + results.next(); + c.setTimeInMillis(timeInMillis); + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + + java.sql.Date expectedDate = new java.sql.Date(c.getTimeInMillis()); + assertEquals(expectedDate, results.getDate("date")); + assertEquals(expectedDate, results.getObject("date", java.sql.Date.class)); + + java.sql.Time expectedTime = new java.sql.Time(0L); + assertEquals(expectedTime, results.getTime("date")); + assertEquals(expectedTime, results.getObject("date", java.sql.Time.class)); + + java.sql.Timestamp expectedTimestamp = new java.sql.Timestamp(c.getTimeInMillis()); + assertEquals(expectedTimestamp, results.getTimestamp("date")); + assertEquals(expectedTimestamp, results.getObject("date", java.sql.Timestamp.class)); + }); + } public void testValidGetObjectCalls() throws Exception { createIndex("test"); updateMappingForNumericValuesTests("test"); @@ -1072,7 +1216,7 @@ public void testValidGetObjectCalls() throws Exception { /* * Checks StackOverflowError fix for https://github.com/elastic/elasticsearch/pull/31735 */ - public void testNoInfiniteRecursiveGetObjectCalls() throws SQLException, IOException { + public void testNoInfiniteRecursiveGetObjectCalls() throws Exception { index("library", "1", builder -> { builder.field("name", "Don Quixote"); builder.field("page_count", 1072); @@ -1243,17 +1387,16 @@ public void testUnsupportedUpdateMethods() throws IOException, SQLException { } private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { - try (Connection connection = esJdbc()) { - try (PreparedStatement statement = connection.prepareStatement(query)) { - try (ResultSet results = statement.executeQuery()) { - consumer.accept(results); - } - } - } + doWithQuery(() -> esJdbc(timeZoneId), query, consumer); } private void doWithQueryAndTimezone(String query, String tz, CheckedConsumer consumer) throws SQLException { - try (Connection connection = esJdbc(tz)) { + doWithQuery(() -> esJdbc(tz), query, consumer); + } + + private void doWithQuery(CheckedSupplier con, String query, CheckedConsumer consumer) + throws SQLException { + try (Connection connection = con.get()) { try (PreparedStatement statement = connection.prepareStatement(query)) { try (ResultSet results = statement.executeQuery()) { consumer.accept(results); @@ -1262,7 +1405,7 @@ private void doWithQueryAndTimezone(String query, String tz, CheckedConsumer body) throws Exception { + protected static void updateMapping(String index, CheckedConsumer body) throws Exception { Request request = new Request("PUT", "/" + index + "/_mapping"); XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); updateMapping.startObject("properties"); @@ -1290,12 +1433,71 @@ private void updateMapping(String index, CheckedConsumer { + builder.startObject("int").field("type", "integer").endObject(); + builder.startObject("keyword").field("type", "keyword").endObject(); + }); + + Integer[] values = randomArray(3, 15, s -> new Integer[s], () -> Integer.valueOf(randomInt(50))); + // add the minimal value in the middle yet the test will pick it up since the results are sorted + values[2] = Integer.valueOf(-10); + + String[] stringValues = new String[values.length]; + for (int i = 0; i < values.length; i++) { + stringValues[i] = String.valueOf(values[i]); + } + + index("test", "1", builder -> { + builder.array("int", (Object[]) values); + builder.array("keyword", stringValues); + }); + } + + private void createTestDataForMultiValuesInObjectsTests() throws Exception { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("object") + .startObject("properties") + .startObject("intsubfield").field("type", "integer").endObject() + .startObject("textsubfield") + .field("type", "text") + .startObject("fields").startObject("keyword").field("type", "keyword").endObject().endObject() + .endObject() + .endObject() + .endObject(); + builder.startObject("keyword").field("type", "keyword").endObject(); + }); + + Integer[] values = randomArray(3, 15, s -> new Integer[s], () -> Integer.valueOf(randomInt(50))); + // add the minimal value in the middle yet the test will pick it up since the results are sorted + values[2] = Integer.valueOf(-25); + + String[] stringValues = new String[values.length]; + for (int i = 0; i < values.length; i++) { + stringValues[i] = String.valueOf(values[i]); + } + stringValues[0] = "xyz"; + + index("test", "1", builder -> { + builder.startArray("object"); + for (int i = 0; i < values.length; i++) { + builder.startObject() + .field("intsubfield", values[i]) + .field("textsubfield", stringValues[i]) + .endObject(); + } + builder.endArray(); + }); + } + + private void createTestDataForByteValueTests(byte random1, byte random2, byte random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_byte").field("type", "byte").endObject(); @@ -1313,7 +1515,7 @@ private void createTestDataForByteValueTests(byte random1, byte random2, byte ra }); } - private void createTestDataForShortValueTests(short random1, short random2, short random3) throws Exception, IOException { + private void createTestDataForShortValueTests(short random1, short random2, short random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_short").field("type", "short").endObject(); @@ -1331,7 +1533,7 @@ private void createTestDataForShortValueTests(short random1, short random2, shor }); } - private void createTestDataForIntegerValueTests(int random1, int random2, int random3) throws Exception, IOException { + private void createTestDataForIntegerValueTests(int random1, int random2, int random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_integer").field("type", "integer").endObject(); @@ -1349,7 +1551,7 @@ private void createTestDataForIntegerValueTests(int random1, int random2, int ra }); } - private void createTestDataForLongValueTests(long random1, long random2, long random3) throws Exception, IOException { + private void createTestDataForLongValueTests(long random1, long random2, long random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_long").field("type", "long").endObject(); @@ -1367,7 +1569,7 @@ private void createTestDataForLongValueTests(long random1, long random2, long ra }); } - private void createTestDataForDoubleValueTests(double random1, double random2, double random3) throws Exception, IOException { + private void createTestDataForDoubleValueTests(double random1, double random2, double random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_double").field("type", "double").endObject(); @@ -1385,7 +1587,7 @@ private void createTestDataForDoubleValueTests(double random1, double random2, d }); } - private void createTestDataForFloatValueTests(float random1, float random2, float random3) throws Exception, IOException { + private void createTestDataForFloatValueTests(float random1, float random2, float random3) throws Exception { createIndex("test"); updateMapping("test", builder -> { builder.startObject("test_float").field("type", "float").endObject(); @@ -1421,7 +1623,7 @@ private void indexSimpleDocumentWithTrueValues(Long randomLongDate) throws IOExc * Creates test data for all numeric get* methods. All values random and different from the other numeric fields already generated. * It returns a map containing the field name and its randomly generated value to be later used in checking the returned values. */ - private Map createTestDataForNumericValueTypes(Supplier randomGenerator) throws Exception, IOException { + private Map createTestDataForNumericValueTypes(Supplier randomGenerator) throws Exception { Map map = new HashMap<>(); createIndex("test"); updateMappingForNumericValuesTests("test"); @@ -1460,7 +1662,7 @@ private Map createTestDataForNumericValueTypes(Supplier r return map; } - private void updateMappingForNumericValuesTests(String indexName) throws Exception { + private static void updateMappingForNumericValuesTests(String indexName) throws Exception { updateMapping(indexName, builder -> { for(String field : fieldsNames) { builder.startObject(field).field("type", field.substring(5)).endObject(); @@ -1477,15 +1679,25 @@ private void assertThrowsWritesUnsupportedForUpdate(ThrowingRunnable r) { assertThrowsUnsupportedAndExpectErrorMessage(r, "Writes not supported"); } - private void validateErrorsForDateTimeTestsWithoutCalendar(CheckedFunction method) { + private void validateErrorsForDateTestsWithoutCalendar(CheckedFunction method) { SQLException sqle; for (Entry, SQLType> field : dateTimeTestingFields.entrySet()) { sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); assertEquals( - format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", field.getKey().v2(), field.getValue()), sqle.getMessage()); } } + + private void validateErrorsForTimeTestsWithoutCalendar(CheckedFunction method) { + SQLException sqle; + for (Entry, SQLType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } private void validateErrorsForDateTimeTestsWithCalendar(Calendar c, CheckedBiFunction method) { SQLException sqle; @@ -1515,31 +1727,27 @@ private Double getMaxLongPlusOne() { } private Connection esJdbc(String timeZoneId) throws SQLException { - return randomBoolean() ? useDriverManager(timeZoneId) : useDataSource(timeZoneId); - } - - private Connection useDriverManager(String timeZoneId) throws SQLException { - String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); - String address = "jdbc:es://" + elasticsearchAddress; Properties connectionProperties = connectionProperties(); connectionProperties.put(JDBC_TIMEZONE, timeZoneId); - Connection connection = DriverManager.getConnection(address, connectionProperties); - + Connection connection = esJdbc(connectionProperties); assertNotNull("The timezone should be specified", connectionProperties.getProperty(JDBC_TIMEZONE)); return connection; } - private Connection useDataSource(String timeZoneId) throws SQLException { - String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); - EsDataSource dataSource = new EsDataSource(); - String address = "jdbc:es://" + elasticsearchAddress; - dataSource.setUrl(address); + private Connection esWithLeniency(boolean multiValueLeniency) throws SQLException { + String property = "field.multi.value.leniency"; Properties connectionProperties = connectionProperties(); - connectionProperties.put(JDBC_TIMEZONE, timeZoneId); - dataSource.setProperties(connectionProperties); - Connection connection = dataSource.getConnection(); - - assertNotNull("The timezone should be specified", connectionProperties.getProperty(JDBC_TIMEZONE)); + connectionProperties.setProperty(property, Boolean.toString(multiValueLeniency)); + Connection connection = esJdbc(connectionProperties); + assertNotNull("The leniency should be specified", connectionProperties.getProperty(property)); return connection; } + + private String asDateString(long millis) { + return of(millis, timeZoneId); + } + + private ZoneId getZoneFromOffset(Long randomLongDate) { + return ZoneId.of(ZoneId.of(timeZoneId).getRules().getOffset(Instant.ofEpochMilli(randomLongDate)).toString()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index e69d229b6f170..4efbbaddc04cb 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -19,6 +19,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.net.URL; +import java.net.URLConnection; import java.nio.charset.StandardCharsets; import java.sql.Connection; import java.sql.ResultSet; @@ -107,7 +108,7 @@ protected ResultSet executeJdbcQuery(Connection con, String query) throws SQLExc } protected int fetchSize() { - return between(1, 500); + return between(1, 150); } // TODO: use UTC for now until deciding on a strategy for handling date extraction @@ -195,6 +196,9 @@ public interface Parser { @SuppressForbidden(reason = "test reads from jar") public static InputStream readFromJarUrl(URL source) throws IOException { - return source.openStream(); + URLConnection con = source.openConnection(); + // do not to cache files (to avoid keeping file handles around) + con.setUseCaches(false); + return con.getInputStream(); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java index e8ba7eb30b048..4ad27d5618c76 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java @@ -75,6 +75,13 @@ public SqlSpecTestCase(String fileName, String groupName, String testName, Integ this.query = query; } + @Override + protected int fetchSize() { + // using a smaller fetchSize for nested documents' tests to uncover bugs + // similar to https://github.com/elastic/elasticsearch/issues/42581 + return randomIntBetween(1, 20); + } + @Override protected final void doTest() throws Throwable { // we skip the tests in case of these locales because ES-SQL is Locale-insensitive for now diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 6f070491df168..cb2edfc520893 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -281,6 +281,13 @@ public void testSelectScoreInScalar() throws Exception { containsString("line 1:12: [SCORE()] cannot be an argument to a function")); } + @Override + public void testHardLimitForSortOnAggregate() throws Exception { + index("{\"a\": 1, \"b\": 2}"); + expectBadRequest(() -> runSql(randomMode(), "SELECT max(a) max FROM test GROUP BY b ORDER BY max LIMIT 10000"), + containsString("The maximum LIMIT for aggregate sorting is [512], received [10000]")); + } + protected void expectBadRequest(CheckedSupplier, Exception> code, Matcher errorMessageMatcher) { try { Map result = code.get(); @@ -533,46 +540,7 @@ public void testBasicQueryText() throws IOException { } public void testNextPageText() throws IOException { - int size = 20; - String[] docs = new String[size]; - for (int i = 0; i < size; i++) { - docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; - } - index(docs); - - String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; - - String cursor = null; - for (int i = 0; i < 20; i += 2) { - Tuple response; - if (i == 0) { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), "text/plain"); - } else { - response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", - ContentType.APPLICATION_JSON), "text/plain"); - } - - StringBuilder expected = new StringBuilder(); - if (i == 0) { - expected.append(" text | number | sum \n"); - expected.append("---------------+---------------+---------------\n"); - } - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + i, i, i + 5)); - expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + (i + 1), i + 1, i + 6)); - cursor = response.v2(); - assertEquals(expected.toString(), response.v1()); - assertNotNull(cursor); - } - Map expected = new HashMap<>(); - expected.put("rows", emptyList()); - assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - StringUtils.EMPTY)); - - Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), - "/close"); - assertEquals(true, response.get("succeeded")); - - assertEquals(0, getNumberOfSearchContexts("test")); + executeQueryWithNextPage("text/plain", " text | number | sum \n", "%-15s|%-15d|%-15d\n"); } // CSV/TSV tests @@ -614,6 +582,10 @@ public void testQueryWithoutHeaderInCSV() throws IOException { Tuple response = runSqlAsText(query, "text/csv; header=absent"); assertEquals(expected, response.v1()); } + + public void testNextPageCSV() throws IOException { + executeQueryWithNextPage("text/csv; header=present", "text,number,sum\r\n", "%s,%d,%d\r\n"); + } public void testQueryInTSV() throws IOException { index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", @@ -632,6 +604,55 @@ public void testQueryInTSV() throws IOException { response = runSqlAsTextFormat(query, "tsv"); assertEquals(expected, response.v1()); } + + public void testNextPageTSV() throws IOException { + executeQueryWithNextPage("text/tab-separated-values", "text\tnumber\tsum\n", "%s\t%d\t%d\n"); + } + + private void executeQueryWithNextPage(String format, String expectedHeader, String expectedLineFormat) throws IOException { + int size = 20; + String[] docs = new String[size]; + for (int i = 0; i < size; i++) { + docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; + } + index(docs); + + String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Tuple response; + if (i == 0) { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity(request, ContentType.APPLICATION_JSON), format); + } else { + response = runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"cursor\":\"" + cursor + "\"}", + ContentType.APPLICATION_JSON), format); + } + + StringBuilder expected = new StringBuilder(); + if (i == 0) { + expected.append(expectedHeader); + if (format == "text/plain") { + expected.append("---------------+---------------+---------------\n"); + } + } + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + i, i, i + 5)); + expected.append(String.format(Locale.ROOT, expectedLineFormat, "text" + (i + 1), i + 1, i + 6)); + cursor = response.v2(); + assertEquals(expected.toString(), response.v1()); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + StringUtils.EMPTY)); + + Map response = runSql(new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + "/close"); + assertEquals(true, response.get("succeeded")); + + assertEquals(0, getNumberOfSearchContexts("test")); + } private Tuple runSqlAsText(String sql, String accept) throws IOException { return runSqlAsText(StringUtils.EMPTY, new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept); diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec index 3edbe94057a91..9a193d76b3166 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg-ordering.sql-spec @@ -29,9 +29,24 @@ SELECT MAX(salary) AS max, MIN(salary) AS min FROM test_emp HAVING MIN(salary) > aggWithoutAlias SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary); +aggWithoutAliasWithLimit +SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary) LIMIT 3; + +aggWithoutAliasWithLimitDesc +SELECT MAX(salary) AS max FROM test_emp GROUP BY gender ORDER BY MAX(salary) DESC LIMIT 3; + aggWithAlias SELECT MAX(salary) AS m FROM test_emp GROUP BY gender ORDER BY m; +aggOrderByCountWithLimit +SELECT MAX(salary) AS max, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c LIMIT 3; + +aggOrderByCountWithLimitDescAndGrouping +SELECT gender, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c DESC LIMIT 5; + +aggOrderByCountWithLimitDesc +SELECT MAX(salary) AS max, COUNT(*) AS c FROM test_emp GROUP BY gender ORDER BY c DESC LIMIT 3; + multipleAggsThatGetRewrittenWithoutAlias SELECT MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY gender ORDER BY MAX(salary); @@ -56,12 +71,21 @@ SELECT MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c aggNotSpecifiedInTheAggregateAndGroupWithHaving SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary), gender; +aggNotSpecifiedInTheAggregateAndGroupWithHavingWithLimit +SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary), c LIMIT 5; + +aggNotSpecifiedInTheAggregateAndGroupWithHavingWithLimitAndDirection +SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY MAX(salary) ASC, c DESC LIMIT 5; + groupAndAggNotSpecifiedInTheAggregateWithHaving SELECT gender, MIN(salary) AS min, COUNT(*) AS c FROM test_emp GROUP BY gender HAVING c > 1 ORDER BY gender, MAX(salary); multipleAggsThatGetRewrittenWithAliasOnAMediumGroupBy SELECT languages, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY languages ORDER BY max; +multipleAggsThatGetRewrittenWithAliasOnAMediumGroupByWithLimit +SELECT languages, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY languages ORDER BY max DESC LIMIT 5; + multipleAggsThatGetRewrittenWithAliasOnALargeGroupBy SELECT emp_no, MAX(salary) AS max, MIN(salary) AS min FROM test_emp GROUP BY emp_no ORDER BY max; @@ -75,13 +99,40 @@ aggNotSpecifiedWithHavingOnLargeGroupBy SELECT MAX(salary) AS max FROM test_emp GROUP BY emp_no HAVING AVG(salary) > 1000 ORDER BY MIN(salary); aggWithTieBreakerDescAsc -SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MIN(languages) DESC NULLS FIRST, emp_no ASC; +SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MIN(languages) DESC NULLS FIRST, emp_no ASC LIMIT 50; aggWithTieBreakerDescDesc -SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MIN(languages) DESC NULLS FIRST, emp_no DESC; +SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MIN(languages) DESC NULLS FIRST, emp_no DESC LIMIT 50; aggWithTieBreakerAscDesc -SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MAX(languages) ASC NULLS FIRST, emp_no DESC; +SELECT emp_no, MIN(languages) AS min FROM test_emp GROUP BY emp_no ORDER BY MAX(languages) ASC NULLS FIRST, emp_no DESC LIMIT 50; aggWithMixOfOrdinals SELECT gender AS g, MAX(salary) AS m FROM test_emp GROUP BY gender ORDER BY 2 DESC LIMIT 3; + +multipleGroupingsAndOrderingByGroups_1 +SELECT gender AS g, first_name, last_name AS l FROM test_emp GROUP BY first_name, g, last_name ORDER BY gender, first_name, last_name; + +multipleGroupingsAndOrderingByGroups_2 +SELECT gender AS g, first_name, last_name AS l FROM test_emp GROUP BY first_name, g, last_name ORDER BY gender DESC, first_name DESC, last_name ASC; + +multipleGroupingsAndOrderingByGroups_3 +SELECT gender AS g, first_name AS f, last_name AS l FROM test_emp GROUP BY f, g, l ORDER BY l, g, f; + +multipleGroupingsAndOrderingByGroups_4 +SELECT gender AS g, first_name, last_name FROM test_emp GROUP BY g, last_name, first_name ORDER BY gender, first_name, last_name; + +multipleGroupingsAndOrderingByGroups_5 +SELECT gender AS g, first_name, last_name AS l FROM test_emp GROUP BY first_name, g, last_name ORDER BY gender DESC, first_name, last_name; + +multipleGroupingsAndOrderingByGroups_6 +SELECT gender AS g, first_name, last_name AS l FROM test_emp GROUP BY first_name, g, last_name ORDER BY gender, first_name DESC, last_name; + +multipleGroupingsAndOrderingByGroups_7 +SELECT gender AS g, first_name AS f, last_name AS l FROM test_emp GROUP BY f, gender, l ORDER BY l, g DESC, f DESC; + +multipleGroupingsAndOrderingByGroups_8 +SELECT gender AS g, first_name, last_name FROM test_emp GROUP BY g, last_name, first_name ORDER BY gender ASC, first_name DESC, last_name ASC; + +multipleGroupingsAndOrderingByGroupsWithFunctions +SELECT first_name f, last_name l, gender g, CONCAT(first_name, last_name) c FROM test_emp GROUP BY gender, l, f, c ORDER BY gender, c DESC, first_name, last_name ASC; diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec index 7f039d9c4951e..fb185282f79af 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.csv-spec @@ -85,8 +85,8 @@ SELECT SUM(salary) FROM test_emp; aggregateWithCastPruned SELECT CAST(SUM(salary) AS INTEGER) FROM test_emp; - SUM(salary) -------------- + CAST(SUM(salary) AS INTEGER) +----------------------------- 4824855 ; @@ -200,6 +200,23 @@ ROUND(SQRT(CAST(EXP(languages) AS SMALLINT)),2):d| COUNT(*):l null |10 ; +groupByRoundWithTwoParams +SELECT ROUND(YEAR("birth_date"), -2) FROM test_emp GROUP BY ROUND(YEAR("birth_date"), -2); + +ROUND(YEAR("birth_date"), -2) +----------------------------- +null +2000 +; + +groupByTruncateWithTwoParams +SELECT TRUNCATE(YEAR("birth_date"), -2) FROM test_emp GROUP BY TRUNCATE(YEAR("birth_date"), -2); + +TRUNCATE(YEAR("birth_date"), -2) +-------------------------------- +null +1900 +; // // Grouping functions @@ -397,6 +414,26 @@ SELECT COUNT(ALL last_name)=COUNT(ALL first_name) AS areEqual, COUNT(ALL first_n false |90 |100 ; +topHitsAsMinAndMax +schema::min:s|max:s|first:s|last:s +SELECT MIN(first_name) as min, MAX(first_name) as max, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp; + + min | max | first | last +---------------+---------------+--------------+---------- + Alejandro | Zvonko | Alejandro | Zvonko +; + +topHitsAsMinAndMaxAndGroupBy +schema::gender:s|min:s|max:s|first:s|last:s +SELECT gender, MIN(first_name) as min, MAX(first_name) as max, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp GROUP BY gender ORDER BY gender; + + gender | min | max | first | last +---------------+---------------+--------------+---------------+---------- +null | Berni | Patricio | Berni | Patricio +F | Alejandro | Xinglin | Alejandro | Xinglin +M | Amabile | Zvonko | Amabile | Zvonko +; + topHitsWithOneArgAndGroupBy schema::gender:s|first:s|last:s SELECT gender, FIRST(first_name) as first, LAST(first_name) as last FROM test_emp GROUP BY gender ORDER BY gender; diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec index 7406ea488308d..e24297f7fa9b3 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec @@ -50,6 +50,28 @@ SELECT emp_no * 2 AS e FROM test_emp GROUP BY e ORDER BY e; groupByModScalar SELECT (emp_no % 3) + 1 AS e FROM test_emp GROUP BY e ORDER BY e; +// group by nested functions with no alias +groupByTruncate +SELECT CAST(TRUNCATE(EXTRACT(YEAR FROM "birth_date")) AS INTEGER) FROM test_emp GROUP BY CAST(TRUNCATE(EXTRACT(YEAR FROM "birth_date")) AS INTEGER) ORDER BY CAST(TRUNCATE(EXTRACT(YEAR FROM "birth_date")) AS INTEGER); +groupByRound +SELECT CAST(ROUND(EXTRACT(YEAR FROM "birth_date")) AS INTEGER) FROM test_emp GROUP BY CAST(ROUND(EXTRACT(YEAR FROM "birth_date")) AS INTEGER) ORDER BY CAST(ROUND(EXTRACT(YEAR FROM "birth_date")) AS INTEGER); +groupByAtan2 +SELECT ATAN2(YEAR("birth_date"), 5) FROM test_emp GROUP BY ATAN2(YEAR("birth_date"), 5) ORDER BY ATAN2(YEAR("birth_date"), 5); +groupByPower +SELECT POWER(YEAR("birth_date"), 2) FROM test_emp GROUP BY POWER(YEAR("birth_date"), 2) ORDER BY POWER(YEAR("birth_date"), 2); +groupByPowerWithCast +SELECT CAST(POWER(YEAR("birth_date"), 2) AS DOUBLE) FROM test_emp GROUP BY CAST(POWER(YEAR("birth_date"), 2) AS DOUBLE) ORDER BY CAST(POWER(YEAR("birth_date"), 2) AS DOUBLE); +groupByConcat +SELECT LEFT(CONCAT("first_name", "last_name"), 3) FROM test_emp GROUP BY LEFT(CONCAT("first_name", "last_name"), 3) ORDER BY LEFT(CONCAT("first_name", "last_name"), 3) LIMIT 15; +groupByLocateWithTwoParams +SELECT LOCATE('a', CONCAT("first_name", "last_name")) FROM test_emp GROUP BY LOCATE('a', CONCAT("first_name", "last_name")) ORDER BY LOCATE('a', CONCAT("first_name", "last_name")); +groupByLocateWithThreeParams +SELECT LOCATE('a', CONCAT("first_name", "last_name"), 3) FROM test_emp GROUP BY LOCATE('a', CONCAT("first_name", "last_name"), 3) ORDER BY LOCATE('a', CONCAT("first_name", "last_name"), 3); +groupByRoundAndTruncateWithTwoParams +SELECT ROUND(SIN(TRUNCATE("salary", 2)), 2) FROM "test_emp" GROUP BY ROUND(SIN(TRUNCATE("salary", 2)), 2) ORDER BY ROUND(SIN(TRUNCATE("salary", 2)), 2) LIMIT 5; +groupByRoundAndTruncateWithOneParam +SELECT ROUND(SIN(TRUNCATE("languages"))) FROM "test_emp" GROUP BY ROUND(SIN(TRUNCATE("languages"))) ORDER BY ROUND(SIN(TRUNCATE("languages"))) LIMIT 5; + // multiple group by groupByMultiOnText SELECT gender g, languages l FROM "test_emp" GROUP BY gender, languages ORDER BY gender ASC, languages ASC; @@ -89,6 +111,8 @@ SELECT (emp_no % 3) + 1 AS e, (languages % 3) + 1 AS l FROM test_emp GROUP BY e, // COUNT aggCountImplicit +SELECT COUNT(*) FROM test_emp; +aggCountImplicitAlias SELECT COUNT(*) AS count FROM test_emp; aggCountImplicitWithCast SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; @@ -109,6 +133,8 @@ SELECT gender g, CAST(COUNT(*) AS INT) c FROM "test_emp" WHERE emp_no < 10020 GR aggCountWithAlias SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g ORDER BY gender; countDistinct +SELECT COUNT(DISTINCT "hire_date") FROM test_emp; +countDistinctAlias SELECT COUNT(DISTINCT hire_date) AS count FROM test_emp; countDistinctAndCountSimpleWithAlias SELECT COUNT(*) cnt, COUNT(DISTINCT first_name) as names, gender FROM test_emp GROUP BY gender ORDER BY gender; @@ -149,10 +175,7 @@ aggCountOnColumnAndMultipleHaving SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender ; aggCountOnColumnAndMultipleHavingEquals SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c = 63 ORDER BY gender ; -// -// Count(column) = Column(*) which is a bug -// https://github.com/elastic/elasticsearch/issues/34549 -// + aggCountOnColumnAndMultipleHavingWithLimit SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender LIMIT 1; aggCountOnColumnAndHavingBetween-Ignore @@ -250,6 +273,8 @@ aggMinWithAlias SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; aggMinOnDateTime SELECT gender, MIN(birth_date) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMinOnDateTimeCastAsDate +SELECT gender, YEAR(CAST(MIN(birth_date) AS DATE)) m FROM "test_emp" GROUP BY gender ORDER BY gender; // Conditional MIN aggMinWithHaving @@ -306,6 +331,8 @@ aggMaxWithAlias SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; aggMaxOnDateTime SELECT gender, MAX(birth_date) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMaxOnDateTimeCastAsDate +SELECT gender, YEAR(CAST(MAX(birth_date) AS DATE)) m FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvgAndMaxWithLikeFilter SELECT CAST(AVG(salary) AS LONG) AS avg, CAST(SUM(salary) AS LONG) AS s FROM "test_emp" WHERE first_name LIKE 'G%'; diff --git a/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec index 4d8a9fc3fc2cf..e117866b5e9d6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec @@ -5,9 +5,14 @@ // the standard behavior here is to return the constant for each element // the weird thing is that an actual query needs to be ran arithmeticWithFrom -SELECT 5 - 2 x FROM test_emp; +SELECT 5 - 2 x FROM test_emp LIMIT 5; -x + x:i +--------------- +3 +3 +3 +3 3 ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec index 9024497554983..d3a92b5eeecce 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/case-functions.sql-spec @@ -16,4 +16,10 @@ ucaseInline1 SELECT UCASE('ElAsTiC') upper; ucaseInline3 -SELECT UCASE(' elastic ') upper; \ No newline at end of file +SELECT UCASE(' elastic ') upper; + +multipleGroupingsAndOrderingByGroupsWithFunctions_1 +SELECT first_name f, last_name l, gender g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp GROUP BY gender, l, f, c ORDER BY c DESC, first_name, l ASC, g; + +multipleGroupingsAndOrderingByGroupsWithFunctions_2 +SELECT first_name f, last_name l, LCASE(gender) g, CONCAT(UCASE(first_name), LCASE(last_name)) c FROM test_emp GROUP BY f, LCASE(gender), l, c ORDER BY c DESC, first_name, l ASC, g; diff --git a/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec deleted file mode 100644 index 2ed05aadb6c45..0000000000000 --- a/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec +++ /dev/null @@ -1,11 +0,0 @@ -// -// Sys Commands -// - -sysTableTypes -SYS TABLE TYPES; - - TABLE_TYPE:s -BASE TABLE -VIEW -; diff --git a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec index 35db16541babf..cd4f7f02f0704 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/date.csv-spec @@ -3,7 +3,7 @@ // currentDateKeywordWithDivision -SELECT YEAR(CURRENT_TIMESTAMP) / 1000 AS result; +SELECT YEAR(CURRENT_DATE) / 1000 AS result; result --------------- @@ -11,7 +11,7 @@ SELECT YEAR(CURRENT_TIMESTAMP) / 1000 AS result; ; currentDateFunctionNoArgsWithDivision -SELECT YEAR(CURRENT_TIMESTAMP()) / 1000 AS result; +SELECT YEAR(CURRENT_DATE()) / 1000 AS result; result --------------- @@ -36,20 +36,20 @@ SELECT TRUNCATE(YEAR(TODAY() - INTERVAL 50 YEARS) / 1000) AS result; currentDateFilter -SELECT first_name FROM test_emp WHERE hire_date > CURRENT_DATE() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 10; +SELECT first_name FROM test_emp WHERE hire_date > CURRENT_DATE() - INTERVAL 35 YEARS ORDER BY first_name ASC LIMIT 10; first_name ----------------- -Kazuhito -Kenroku -Lillian -Mayumi -Mingsen -Sailaja -Saniya -Shahaf -Suzette -Tuval +Alejandro +Amabile +Anneke +Anoosh +Arumugam +Basil +Berhard +Berni +Bezalel +Bojan ; currentDateFilterScript diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec index 8d9a65d1b85b6..bfb28775bc3b6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime-interval.csv-spec @@ -182,6 +182,26 @@ SELECT -2 * INTERVAL '1 23:45' DAY TO MINUTES AS result; -3 23:30:00.0 ; +intervalHoursMultiply +SELECT 4 * -INTERVAL '2' HOURS AS result1, -5 * -INTERVAL '3' HOURS AS result2; + result1 | result2 +---------------+-------------- +-0 08:00:00.0 | +0 15:00:00.0 +; + +intervalAndFieldMultiply +schema::languages:byte|result:string +SELECT languages, CAST (languages * INTERVAL '1 10:30' DAY TO MINUTES AS string) AS result FROM test_emp ORDER BY emp_no LIMIT 5; + + languages | result +---------------+--------------------------------------------- +2 | +2 21:00:00.0 +5 | +7 04:30:00.0 +4 | +5 18:00:00.0 +5 | +7 04:30:00.0 +1 | +1 10:30:00.0 +; + dateMinusInterval SELECT CAST('2018-05-13T12:34:56' AS DATETIME) - INTERVAL '2-8' YEAR TO MONTH AS result; diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec index 50df0f7dfe67e..367b5d0ddfdcf 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec @@ -346,6 +346,22 @@ SELECT YEAR(NOW() - INTERVAL 2 YEARS) / 1000 AS result; 2 ; +dateAndIntervalPaginated +SELECT YEAR(birth_date - INTERVAL 2 YEARS) / 1000 AS result FROM test_emp ORDER BY birth_date LIMIT 10; + + result +--------------- +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +; currentTimestampFilter SELECT first_name FROM test_emp WHERE hire_date > NOW() - INTERVAL 100 YEARS ORDER BY first_name ASC LIMIT 10; @@ -381,3 +397,11 @@ Bezalel Bojan ; + +// datetime in aggregations +doubleCastOfDateInAggs +SELECT CAST (CAST (birth_date AS VARCHAR) AS TIMESTAMP) a FROM test_emp WHERE YEAR(birth_date) = 1965 GROUP BY a; + a:ts +--------------- +1965-01-03T00:00:00Z +; \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec index bb572ecca9d1a..1728afe43b86b 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec @@ -600,8 +600,8 @@ M |57 ; groupByAndAggExpression -// tag::groupByAndAggExpression schema::g:s|salary:i +// tag::groupByAndAggExpression SELECT gender AS g, ROUND((MIN(salary) / 100)) AS salary FROM emp GROUP BY gender; g | salary @@ -897,9 +897,113 @@ Frank Herbert |Dune |604 |1965-06-01T00:00:00Z Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00Z James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z +// end::orderByBasic +; +orderByGroup +schema::g:s|c:i +// tag::orderByGroup +SELECT gender AS g, COUNT(*) AS c FROM emp GROUP BY gender ORDER BY g DESC; -// end::orderByBasic + g | c +---------------+--------------- +M |57 +F |33 +null |10 + +// end::orderByGroup +; + +orderByAgg +schema::g:s|salary:i +// tag::orderByAgg +SELECT gender AS g, MIN(salary) AS salary FROM emp GROUP BY gender ORDER BY salary DESC; + + g | salary +---------------+--------------- +F |25976 +M |25945 +null |25324 + +// end::orderByAgg +; + +simpleMatch +// tag::simpleMatch +SELECT author, name FROM library WHERE MATCH(author, 'frank'); + + author | name +---------------+------------------- +Frank Herbert |Dune +Frank Herbert |Dune Messiah +Frank Herbert |Children of Dune +Frank Herbert |God Emperor of Dune + +// end::simpleMatch +; + +multiFieldsMatch +// tag::multiFieldsMatch +SELECT author, name, SCORE() FROM library WHERE MATCH('author^2,name^5', 'frank dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |11.443176 +Frank Herbert |Dune Messiah |9.446629 +Frank Herbert |Children of Dune |8.043278 +Frank Herbert |God Emperor of Dune|7.0029488 + +// end::multiFieldsMatch +; + +optionalParamsForMatch +// tag::optionalParamsForMatch +SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=or;cutoff_frequency=0.2'); + + author | name | SCORE() +-----------------+------------------------------------+--------------- +Peter F. Hamilton|Pandora's Star |3.0997515 +Douglas Adams |The Hitchhiker's Guide to the Galaxy|3.1756816 + +// end::optionalParamsForMatch +; + +simpleQueryQuery +// tag::simpleQueryQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('name:dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |2.2886353 +Frank Herbert |Dune Messiah |1.8893257 +Frank Herbert |Children of Dune |1.6086556 +Frank Herbert |God Emperor of Dune|1.4005898 +// end::simpleQueryQuery +; + +advancedQueryQuery +// tag::advancedQueryQuery +SELECT author, name, page_count, SCORE() FROM library WHERE QUERY('_exists_:"author" AND page_count:>200 AND (name:/star.*/ OR name:duna~)'); + + author | name | page_count | SCORE() +------------------+-------------------+---------------+--------------- +Frank Herbert |Dune |604 |3.7164764 +Frank Herbert |Dune Messiah |331 |3.4169943 +Frank Herbert |Children of Dune |408 |3.2064917 +Frank Herbert |God Emperor of Dune|454 |3.0504425 +Peter F. Hamilton |Pandora's Star |768 |3.0 +Robert A. Heinlein|Starship Troopers |335 |3.0 +// end::advancedQueryQuery +; + +optionalParameterQuery +// tag::optionalParameterQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('dune god', 'default_operator=and;default_field=name'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |God Emperor of Dune|3.6984892 +// end::optionalParameterQuery ; orderByScore @@ -2234,18 +2338,28 @@ SELECT WEEK(CAST('1988-01-05T09:22:10Z' AS TIMESTAMP)) AS week, ISOWEEK(CAST('19 currentDate-Ignore -// tag::curDate -SELECT CURRENT_TIMESTAMP AS result; +// tag::currentDate +SELECT CURRENT_DATE AS result; result ------------------------ 2018-12-12 -// end::curDate +// end::currentDate ; currentDateFunction-Ignore +// tag::currentDateFunction +SELECT CURRENT_DATE() AS result; + + result +------------------------ +2018-12-12 +// end::currentDateFunction +; + +curDateFunction-Ignore // tag::curDateFunction -SELECT CURRENT_TIMESTAMP() AS result; +SELECT CURDATE() AS result; result ------------------------ @@ -2265,15 +2379,15 @@ SELECT TODAY() AS result; filterToday // tag::filterToday -SELECT first_name FROM emp WHERE hire_date > TODAY() - INTERVAL 25 YEARS ORDER BY first_name ASC LIMIT 5; +SELECT first_name FROM emp WHERE hire_date > TODAY() - INTERVAL 35 YEARS ORDER BY first_name ASC LIMIT 5; first_name ------------ -Kazuhito -Kenroku -Lillian -Mayumi -Mingsen +Alejandro +Amabile +Anneke +Anoosh +Arumugam // end::filterToday ; @@ -2338,23 +2452,47 @@ Arumugam //////////// limitationSubSelect // tag::limitationSubSelect -SELECT * FROM (SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%') WHERE first_name LIKE 'A%'; +SELECT * FROM (SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%') WHERE first_name LIKE 'A%' ORDER BY 1; first_name | last_name ---------------+--------------- -Anneke |Preusig -Anoosh |Peyn -Arumugam |Ossenbruggen + Alejandro |McAlpine + Anneke |Preusig + Anoosh |Peyn + Arumugam |Ossenbruggen // end::limitationSubSelect ; -limitationSubSelect +limitationSubSelectRewritten // tag::limitationSubSelectRewritten -SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%' AND first_name LIKE 'A%'; +SELECT first_name, last_name FROM emp WHERE last_name NOT LIKE '%a%' AND first_name LIKE 'A%' ORDER BY 1; // end::limitationSubSelectRewritten first_name | last_name ---------------+--------------- -Anneke |Preusig -Anoosh |Peyn -Arumugam |Ossenbruggen + Alejandro |McAlpine + Anneke |Preusig + Anoosh |Peyn + Arumugam |Ossenbruggen ; + +simpleLikeOperator +// tag::simpleLike +SELECT author, name FROM library WHERE name LIKE 'Dune%'; + + author | name +---------------+--------------- +Frank Herbert |Dune +Frank Herbert |Dune Messiah +// end::simpleLike +; + +simpleRLikeOperator +// tag::simpleRLike +SELECT author, name FROM library WHERE name RLIKE 'Child.* Dune'; + + author | name +---------------+---------------- +Frank Herbert |Children of Dune +// end::simpleRLike +; + diff --git a/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec new file mode 100644 index 0000000000000..e60460a63e568 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec @@ -0,0 +1,119 @@ +// +// Filter +// + +whereFieldWithRLikeMatch +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name RLIKE 'S.*'; + + l:s +--------------- +Simmel +; + +whereFieldWithNotRLikeMatch +SELECT last_name, first_name FROM "test_emp" WHERE emp_no < 10020 AND first_name NOT RLIKE 'Ma.*' ORDER BY first_name LIMIT 5; + + last_name:s | first_name:s +---------------+--------------- +Preusig |Anneke +Genin |Berni +Simmel |Bezalel +Koblick |Chirstian +Bouloucos |Cristinel +; + +whereFieldWithRLikeMatchNot +SELECT last_name AS L, emp_no FROM "test_emp" WHERE NOT (emp_no < 10003 AND L NOT RLIKE 'K.*') ORDER BY emp_no LIMIT 5; + + L:s | emp_no:i +---------------+--------------- +Bamford |10003 +Koblick |10004 +Maliniak |10005 +Preusig |10006 +Zielinski |10007 +; + +whereFieldOnMatchWithAndAndOr +SELECT last_name l, gender g FROM "test_emp" WHERE (last_name RLIKE 'K.*' OR gender = 'F') AND emp_no < 10050 ORDER BY last_name; + + l:s | g:s +---------------+--------------- +Casley |F +Kalloufi |M +Koblick |M +Lenart |F +Meriste |F +Montemayor |F +Peac |F +Pettey |F +Preusig |F +Reistad |F +Reistad |F +Simmel |F +Stamatiou |F +Tramer |F +Zielinski |F +; + +whereFieldWithRLikeAndGroupByOrderBy +SELECT last_name l, gender g, COUNT(*) c, MAX(salary) AS sal FROM "test_emp" WHERE emp_no < 10050 AND (last_name RLIKE 'B.*' OR gender = 'F') GROUP BY g, l ORDER BY sal; + + l:s | g:s | c:l | sal:i +---------------+---------------+---------------+--------------- +Berztiss |M |1 |28336 +Stamatiou |F |1 |30404 +Brender |M |1 |36051 +Meriste |F |1 |37112 +Tramer |F |1 |37853 +Casley |F |1 |39728 +Montemayor |F |1 |47896 +Bridgland |null |1 |48942 +Simmel |F |1 |56371 +Lenart |F |1 |56415 +Bouloucos |null |1 |58715 +Preusig |F |1 |60335 +Bamford |M |1 |61805 +Pettey |F |1 |64675 +Peac |F |1 |66174 +Reistad |F |2 |73851 +Zielinski |F |1 |74572 +; + +whereFieldWithRLikeAndNotRLike +SELECT COUNT(*), last_name AS f FROM test_emp WHERE last_name RLIKE '.*o.*' AND last_name NOT RLIKE '.*f.*' GROUP BY f HAVING COUNT(*) > 1; + + COUNT(*):l | f:s +---------------+--------------- +2 |Lortz +; + +whereInlineRLike +SELECT emp_no FROM test_emp WHERE 'aaabbb' RLIKE 'aa+b+' AND 'aaabbb' NOT RLIKE 'a++c+' AND emp_no < 10080 ORDER BY emp_no DESC LIMIT 5; + + emp_no:i +--------------- +10079 +10078 +10077 +10076 +10075 +; + +whereInlineRLikeAndCount_1 +SELECT COUNT(*), TRUNCATE(emp_no, -2) t FROM test_emp WHERE 'aaabbb' RLIKE '.....?.?' AND 'aaabbb' NOT RLIKE 'aa?bb?' GROUP BY TRUNCATE(emp_no, -2) ORDER BY t ASC; + + COUNT(*):l | t:i +---------------+--------------- +99 |10000 +1 |10100 +; + +whereInlineRLikeAndCount_2 +SELECT COUNT(*), TRUNCATE(emp_no, -2) t FROM test_emp WHERE 'aaabbb' RLIKE 'a{2,}b{2,}' AND 'aaabbb' NOT RLIKE 'a{4,6}b{4,6}' GROUP BY TRUNCATE(emp_no, -2) ORDER BY t ASC; + + COUNT(*):l | t:i +---------------+--------------- +99 |10000 +1 |10100 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec index af81b060ebd3f..d156472697995 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec @@ -51,6 +51,8 @@ whereFieldWithLikeMatch SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name LIKE 'K%'; whereFieldWithNotLikeMatch SELECT last_name l FROM "test_emp" WHERE emp_no < 10020 AND first_name NOT LIKE 'Ma%'; +whereFieldWithInlineLikeMatch +SELECT emp_no FROM "test_emp" WHERE 'aaabbb' LIKE 'aa%b%' AND 'aaabbb' NOT LIKE 'a%%c%' AND emp_no < 10080 ORDER BY emp_no DESC LIMIT 5; whereFieldWithOrderNot SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; @@ -64,6 +66,9 @@ SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND last_name NOT L whereFieldOnMatchWithAndAndOr SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND (gender = 'M' AND NOT FALSE OR last_name LIKE 'K%') ORDER BY emp_no; +whereFieldWithLikeAndNotLike +SELECT COUNT(*), last_name AS f FROM test_emp WHERE last_name LIKE '%o%' AND last_name NOT LIKE '%f%' GROUP BY f HAVING COUNT(*) > 1; + // TODO: (NOT) RLIKE in particular and more NOT queries in general whereIsNull diff --git a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec index 07df14d99e36b..99aa07ec91f4d 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec @@ -30,6 +30,60 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', ' 10096 |Jayson |M |Mandell ; +matchWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=6'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +matchWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=AUTO:1,7;fuzzy_rewrite=scoring_boolean'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + +multiMatchWithFuzzinessAuto +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=AUTO:1,5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Shir |McClurg |8.210788 +; + +multiMatchWithFuzziness +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Gino |Leonhardt |4.105394 +Shir |McClurg |8.210788 +Uri |Lenart |4.105394 +; + +queryWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +queryWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=AUTO:1,5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + matchQuery SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec index 19541cf5d9f32..610217b233314 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/null.csv-spec @@ -61,6 +61,13 @@ c:i ; coalesceMixed +SELECT COALESCE(null, 123, null, 321); + +COALESCE(null, 123, null, 321):i +123 +; + +coalesceMixedWithAlias SELECT COALESCE(null, 123, null, 321) AS c; c:i diff --git a/x-pack/plugin/sql/qa/src/main/resources/select.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/select.sql-spec index 82bbbf12e961e..9f9731efcc5b9 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/select.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/select.sql-spec @@ -108,9 +108,7 @@ selectMathPIFromIndexWithWhereEvaluatingToTrue SELECT PI() AS pi FROM test_emp WHERE ROUND(PI(),2)=3.14; selectMathPIFromIndexWithWhereEvaluatingToTrueAndWithLimit SELECT PI() AS pi FROM test_emp WHERE ROUND(PI(),2)=3.14 LIMIT 3; -// AwaitsFix https://github.com/elastic/elasticsearch/issues/35980 -selectMathPIFromIndexWithWhereEvaluatingToFalse-Ignore +selectMathPIFromIndexWithWhereEvaluatingToFalse SELECT PI() AS pi FROM test_emp WHERE PI()=5; -// AwaitsFix https://github.com/elastic/elasticsearch/issues/35980 -selectMathPIFromIndexWithWhereEvaluatingToFalseAndWithLimit-Ignore +selectMathPIFromIndexWithWhereEvaluatingToFalseAndWithLimit SELECT PI() AS pi FROM test_emp WHERE PI()=5 LIMIT 3; diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql index f61d48af4ff37..8db4913b775ff 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql @@ -30,10 +30,10 @@ FROM DUAL UNION ALL SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 0, 2147483647, null, null, 1, -- columnNullable - null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 12, 0, 2147483647, 2, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL -SELECT null, 'test2', 'date', 93, 'DATETIME', 24, 8, null, null, +SELECT null, 'test2', 'date', 93, 'DATETIME', 29, 8, null, null, 1, -- columnNullable null, null, 9, 3, null, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL diff --git a/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec new file mode 100644 index 0000000000000..8a5d53a0fe2f2 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/single-node-only/command-sys.csv-spec @@ -0,0 +1,120 @@ +// +// Sys Commands +// + +sysColumnsWithTableLikeWithEscape +SYS COLUMNS TABLE LIKE 'test\_emp' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithTableLikeNoEscape +SYS COLUMNS TABLE LIKE 'test_emp'; + +// since there's no escaping test_emp means test*emp which matches also test_alias_emp +// however as there's no way to filter the matching indices, we can't exclude the field + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra.info.gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsWithCatalogAndLike +SYS COLUMNS CATALOG 'x-pack_plugin_sql_qa_single-node_integTestCluster' TABLE LIKE 'test\_emp\_copy' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+-------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra.info.gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy|salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsOnAliasWithTableLike +SYS COLUMNS TABLE LIKE 'test\_alias' ESCAPE '\'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra.info.gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_alias |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; + +sysColumnsAllTables +SYS COLUMNS TABLE LIKE '%'; + + TABLE_CAT:s | TABLE_SCHEM:s| TABLE_NAME:s | COLUMN_NAME:s | DATA_TYPE:i | TYPE_NAME:s | COLUMN_SIZE:i| BUFFER_LENGTH:i|DECIMAL_DIGITS:i|NUM_PREC_RADIX:i | NULLABLE:i| REMARKS:s | COLUMN_DEF:s |SQL_DATA_TYPE:i|SQL_DATETIME_SUB:i|CHAR_OCTET_LENGTH:i|ORDINAL_POSITION:i|IS_NULLABLE:s|SCOPE_CATALOG:s|SCOPE_SCHEMA:s|SCOPE_TABLE:s|SOURCE_DATA_TYPE:sh|IS_AUTOINCREMENT:s|IS_GENERATEDCOLUMN:s +---------------+---------------+---------------+--------------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+---------------+----------------+-----------------+----------------+---------------+---------------+---------------+---------------+----------------+----------------+------------------ +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |@timestamp |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_in |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |2 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |bytes_out |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |client_port |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |dest_ip |12 |IP |0 |39 |null |null |1 |null |null |12 |0 |null |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |id |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |logs |status |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |4 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |5 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |6 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |birth_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |1 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |emp_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |3 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |7 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |extra_no |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |8 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |9 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |first_name.keyword|12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |10 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |gender |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |11 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |hire_date |93 |DATETIME |29 |8 |null |null |1 |null |null |9 |3 |null |12 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |languages |-6 |BYTE |5 |1 |null |10 |1 |null |null |-6 |0 |null |13 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name |12 |TEXT |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |14 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |last_name.keyword |12 |KEYWORD |0 |2147483647 |null |null |1 |null |null |12 |0 |2147483647 |15 |YES |null |null |null |null |NO |NO +x-pack_plugin_sql_qa_single-node_integTestCluster |null |test_emp_copy |salary |4 |INTEGER |11 |4 |null |10 |1 |null |null |4 |0 |null |16 |YES |null |null |null |null |NO |NO +; \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 deleted file mode 100644 index 7a4b68f8d857d..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0.jar.sha1 new file mode 100644 index 0000000000000..2619abce7979a --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0.jar.sha1 @@ -0,0 +1 @@ +407c555efb2d3253f51a676cc2089a5d29a3b7b7 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java index aaa8c56323d55..50a1ac2397116 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java @@ -57,7 +57,7 @@ public AbstractSqlQueryRequest() { } public AbstractSqlQueryRequest(String query, List params, QueryBuilder filter, ZoneId zoneId, - int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, RequestInfo requestInfo) { + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, RequestInfo requestInfo) { super(requestInfo); this.query = query; this.params = params; diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java index 60c7b66352ce1..0a81505ffbadc 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -14,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.proto.RequestInfo; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; @@ -29,21 +31,27 @@ */ public class SqlQueryRequest extends AbstractSqlQueryRequest { private static final ObjectParser PARSER = objectParser(SqlQueryRequest::new); + static final ParseField FIELD_MULTI_VALUE_LENIENCY = new ParseField("field_multi_value_leniency"); + static { PARSER.declareString(SqlQueryRequest::cursor, CURSOR); + PARSER.declareBoolean(SqlQueryRequest::fieldMultiValueLeniency, FIELD_MULTI_VALUE_LENIENCY); } private String cursor = ""; + private boolean fieldMultiValueLeniency = Protocol.FIELD_MULTI_VALUE_LENIENCY; public SqlQueryRequest() { super(); } public SqlQueryRequest(String query, List params, QueryBuilder filter, ZoneId zoneId, - int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, String cursor, RequestInfo requestInfo) { + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, + String cursor, RequestInfo requestInfo, boolean fieldMultiValueLeniency) { super(query, params, filter, zoneId, fetchSize, requestTimeout, pageTimeout, requestInfo); this.cursor = cursor; + this.fieldMultiValueLeniency = fieldMultiValueLeniency; } @Override @@ -75,15 +83,27 @@ public SqlQueryRequest cursor(String cursor) { return this; } + + public SqlQueryRequest fieldMultiValueLeniency(boolean leniency) { + this.fieldMultiValueLeniency = leniency; + return this; + } + + public boolean fieldMultiValueLeniency() { + return fieldMultiValueLeniency; + } + public SqlQueryRequest(StreamInput in) throws IOException { super(in); cursor = in.readString(); + fieldMultiValueLeniency = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(cursor); + out.writeBoolean(fieldMultiValueLeniency); } @Override @@ -93,7 +113,9 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return super.equals(obj) && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor); + return super.equals(obj) + && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor) + && fieldMultiValueLeniency == ((SqlQueryRequest) obj).fieldMultiValueLeniency; } @Override @@ -105,7 +127,8 @@ public String getDescription() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // This is needed just to test round-trip compatibility with proto.SqlQueryRequest return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(query(), params(), zoneId(), fetchSize(), requestTimeout(), - pageTimeout(), filter(), cursor(), requestInfo()).toXContent(builder, params); + pageTimeout(), filter(), cursor(), requestInfo(), fieldMultiValueLeniency()) + .toXContent(builder, params); } public static SqlQueryRequest fromXContent(XContentParser parser) { diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java index 5443f09c5eb81..66fd5478292b4 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestBuilder.java @@ -25,14 +25,15 @@ public class SqlQueryRequestBuilder extends ActionRequestBuilder params, QueryBuilder filter, ZoneId zoneId, int fetchSize, TimeValue requestTimeout, - TimeValue pageTimeout, String nextPageInfo, RequestInfo requestInfo) { - super(client, action, new SqlQueryRequest(query, params, filter, zoneId, fetchSize, requestTimeout, pageTimeout, nextPageInfo, - requestInfo)); + TimeValue pageTimeout, String nextPageInfo, RequestInfo requestInfo, + boolean multiValueFieldLeniency) { + super(client, action, new SqlQueryRequest(query, params, filter, zoneId, fetchSize, requestTimeout, pageTimeout, + nextPageInfo, requestInfo, multiValueFieldLeniency)); } public SqlQueryRequestBuilder query(String query) { @@ -79,4 +80,9 @@ public SqlQueryRequestBuilder fetchSize(int fetchSize) { request.fetchSize(fetchSize); return this; } + + public SqlQueryRequestBuilder multiValueFieldLeniency(boolean lenient) { + request.fieldMultiValueLeniency(lenient); + return this; + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java index 5b9f21105c7f9..b60e90d5ae879 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlTranslateRequest.java @@ -64,8 +64,12 @@ public static SqlTranslateRequest fromXContent(XContentParser parser) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // This is needed just to test parsing of SqlTranslateRequest, so we can reuse SqlQuerySerialization - return new SqlQueryRequest(query(), params(), zoneId(), fetchSize(), requestTimeout(), - pageTimeout(), filter(), null, requestInfo()).toXContent(builder, params); + // This is needed just to test parsing of SqlTranslateRequest, so we can reuse SqlQuerySerialization + return new SqlQueryRequest(query(), params(), zoneId(), fetchSize(), requestTimeout(), pageTimeout(), + filter(), + null, + requestInfo(), + false).toXContent(builder, params); } } diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java index 8b8d7ede7a6ce..9104525ca911f 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlQueryRequestTests.java @@ -55,8 +55,8 @@ protected NamedXContentRegistry xContentRegistry() { protected SqlQueryRequest createTestInstance() { return new SqlQueryRequest(randomAlphaOfLength(10), randomParameters(), SqlTestUtils.randomFilterOrNull(random()), randomZone(), between(1, Integer.MAX_VALUE), - randomTV(), randomTV(), randomAlphaOfLength(10), requestInfo - ); + randomTV(), randomTV(), randomAlphaOfLength(10), requestInfo, + randomBoolean() ); } private RequestInfo randomRequestInfo() { @@ -112,8 +112,8 @@ protected SqlQueryRequest mutateInstance(SqlQueryRequest instance) { request -> request.cursor(randomValueOtherThan(request.cursor(), SqlQueryResponseTests::randomStringCursor)) ); SqlQueryRequest newRequest = new SqlQueryRequest(instance.query(), instance.params(), instance.filter(), - instance.zoneId(), instance.fetchSize(), instance.requestTimeout(), instance.pageTimeout(), instance.cursor(), - instance.requestInfo()); + instance.zoneId(), instance.fetchSize(), instance.requestTimeout(), instance.pageTimeout(), + instance.cursor(), instance.requestInfo(), instance.fieldMultiValueLeniency()); mutator.accept(newRequest); return newRequest; } diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index be7fe189cf033..6f57ea279c5ab 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -14,14 +14,20 @@ archivesBaseName = 'elasticsearch-sql-cli' description = 'Command line interface to Elasticsearch that speaks SQL' dependencies { - compile "org.jline:jline:3.8.2" + + // select just the parts of JLine that are needed + compile "org.jline:jline-terminal:${jlineVersion}" + compile("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + compile "org.jline:jline-reader:${jlineVersion}" + compile "org.jline:jline-style:${jlineVersion}" + compile xpackProject('plugin:sql:sql-client') - compile xpackProject('plugin:sql:sql-action') + compile xpackProject('plugin:sql:sql-action') compile "org.elasticsearch:elasticsearch-cli:${version}" - runtime "org.fusesource.jansi:jansi:1.16" runtime "org.elasticsearch:jna:${versions.jna}" - testCompile "org.elasticsearch.test:framework:${version}" } @@ -32,6 +38,7 @@ dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' mapping from: /sql-action.*/, to: 'elasticsearch' mapping from: /sql-client.*/, to: 'elasticsearch' + mapping from: /jline-.*/, to: 'jline' ignoreSha 'elasticsearch-cli' ignoreSha 'elasticsearch-core' ignoreSha 'elasticsearch' @@ -41,7 +48,7 @@ dependencyLicenses { /* * Bundle all dependencies into the main jar and mark it as executable it - * can be easilly shipped around and used. + * can be easily shipped around and used. */ jar { from({ @@ -78,37 +85,6 @@ forbiddenApisMain { signaturesFiles += files('src/forbidden/cli-signatures.txt') } -thirdPartyAudit.ignoreMissingClasses ( - // jLine's optional dependencies - 'org.apache.sshd.client.SshClient', - 'org.apache.sshd.client.auth.keyboard.UserInteraction', - 'org.apache.sshd.client.channel.ChannelShell', - 'org.apache.sshd.client.channel.ClientChannel', - 'org.apache.sshd.client.channel.ClientChannelEvent', - 'org.apache.sshd.client.future.AuthFuture', - 'org.apache.sshd.client.future.ConnectFuture', - 'org.apache.sshd.client.future.OpenFuture', - 'org.apache.sshd.client.session.ClientSession', - 'org.apache.sshd.common.Factory', - 'org.apache.sshd.common.channel.PtyMode', - 'org.apache.sshd.common.config.keys.FilePasswordProvider', - 'org.apache.sshd.common.util.io.NoCloseInputStream', - 'org.apache.sshd.common.util.io.NoCloseOutputStream', - 'org.apache.sshd.server.Command', - 'org.apache.sshd.server.Environment', - 'org.apache.sshd.server.ExitCallback', - 'org.apache.sshd.server.SessionAware', - 'org.apache.sshd.server.Signal', - 'org.apache.sshd.server.SshServer', - 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', - 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', - 'org.apache.sshd.server.session.ServerSession', - 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', - 'org.mozilla.universalchardet.UniversalDetector', - 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', -) - task runcli { description = 'Run the CLI and connect to elasticsearch running on 9200' dependsOn 'assemble' @@ -130,7 +106,7 @@ task runcli { } // Use the jar for testing so we can get the proper version information -unitTest { +test { classpath -= compileJava.outputs.files classpath -= configurations.compile classpath -= configurations.runtime diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 deleted file mode 100644 index 8adc5c7977cf8..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1aaf0028852164ab6b4057192ccd0ba7dedd3a5 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 deleted file mode 100644 index 29e11fa3a021e..0000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b81efadcb78388b662ede7965b272be56a86ec1 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 new file mode 100644 index 0000000000000..8f97d149b3993 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 @@ -0,0 +1 @@ +c93b837f49fe8eb3f68cc3daee5dfb83141ca538 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 new file mode 100644 index 0000000000000..f308f7a728a19 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 @@ -0,0 +1 @@ +495cfd226e13abf2b8a5be4f270d5b9897588733 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 new file mode 100644 index 0000000000000..ae6bccfede9d4 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 @@ -0,0 +1 @@ +ac853ad4dd46252319cbb9c012d9ab1bcc501162 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 new file mode 100644 index 0000000000000..4d28d9789805c --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 @@ -0,0 +1 @@ +797f8cadcb4a969881e8dbd07a623d1b13214984 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 6431f10a49217..9a1d26e63570e 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -7,6 +7,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; + import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; @@ -20,11 +21,12 @@ import org.elasticsearch.xpack.sql.cli.command.PrintLogoCommand; import org.elasticsearch.xpack.sql.cli.command.ServerInfoCliCommand; import org.elasticsearch.xpack.sql.cli.command.ServerQueryCliCommand; -import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.ClientException; import org.elasticsearch.xpack.sql.client.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.Version; import org.jline.terminal.TerminalBuilder; + import java.io.IOException; import java.net.ConnectException; import java.sql.SQLInvalidAuthorizationSpecException; @@ -46,8 +48,13 @@ public class Cli extends LoggingAwareCommand { * -Dorg.jline.terminal.dumb=true */ public static void main(String[] args) throws Exception { - final Cli cli = new Cli(new JLineTerminal(TerminalBuilder.builder().build(), true)); configureJLineLogging(); + final Cli cli = new Cli(new JLineTerminal(TerminalBuilder.builder() + .name("Elasticsearch SQL CLI") + // remove jansi since it has issues on Windows in closing terminals + // the CLI uses JNA anyway + .jansi(false) + .build(), true)); int status = cli.main(args, Terminal.DEFAULT); if (status != ExitCodes.OK) { exit(status); @@ -142,7 +149,7 @@ private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, Con "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); } else if (ex.getCause() != null && ex.getCause() instanceof SQLInvalidAuthorizationSpecException) { throw new UserException(ExitCodes.NOPERM, - "Cannot establish a secure connection to the server " + + "Cannot establish a secure connection to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); } else { // Most likely we connected to something other than Elasticsearch diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index 86b5cf6c36ef2..7a8faa397bc5a 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -25,7 +25,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l BasicFormatter formatter; String data; try { - response = cliClient.queryInit(line, cliSession.getFetchSize()); + response = cliClient.basicQuery(line, cliSession.getFetchSize()); formatter = new BasicFormatter(response.columns(), response.rows(), CLI); data = formatter.formatWithHeader(response.columns(), response.rows()); while (true) { diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 9d4ded4a39c14..feb1242fb8f30 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -31,11 +31,11 @@ public void testExceptionHandling() throws Exception { TestTerminal testTerminal = new TestTerminal(); HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); - when(client.queryInit("blah", 1000)).thenThrow(new SQLException("test exception")); + when(client.basicQuery("blah", 1000)).thenThrow(new SQLException("test exception")); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "blah")); assertEquals("Bad request [test exception]\n", testTerminal.toString()); - verify(client, times(1)).queryInit(eq("blah"), eq(1000)); + verify(client, times(1)).basicQuery(eq("blah"), eq(1000)); verifyNoMoreInteractions(client); } @@ -44,11 +44,11 @@ public void testOnePageQuery() throws Exception { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); cliSession.setFetchSize(10); - when(client.queryInit("test query", 10)).thenReturn(fakeResponse("", true, "foo")); + when(client.basicQuery("test query", 10)).thenReturn(fakeResponse("", true, "foo")); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfoo \n", testTerminal.toString()); - verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verify(client, times(1)).basicQuery(eq("test query"), eq(10)); verifyNoMoreInteractions(client); } @@ -57,14 +57,14 @@ public void testThreePageQuery() throws Exception { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); cliSession.setFetchSize(10); - when(client.queryInit("test query", 10)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.basicQuery("test query", 10)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenReturn(fakeResponse("my_cursor2", false, "second")); when(client.nextPage("my_cursor2")).thenReturn(fakeResponse("", false, "third")); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \nsecond \nthird \n", testTerminal.toString()); - verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verify(client, times(1)).basicQuery(eq("test query"), eq(10)); verify(client, times(2)).nextPage(any()); verifyNoMoreInteractions(client); } @@ -76,13 +76,13 @@ public void testTwoPageQueryWithSeparator() throws Exception { cliSession.setFetchSize(15); // Set a separator cliSession.setFetchSeparator("-----"); - when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.basicQuery("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenReturn(fakeResponse("", false, "second")); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \n-----\nsecond \n", testTerminal.toString()); - verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).basicQuery(eq("test query"), eq(15)); verify(client, times(1)).nextPage(any()); verifyNoMoreInteractions(client); } @@ -92,14 +92,14 @@ public void testCursorCleanupOnError() throws Exception { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); cliSession.setFetchSize(15); - when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.basicQuery("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); when(client.queryClose("my_cursor1", Mode.CLI)).thenReturn(true); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \n" + "Bad request [test exception]\n", testTerminal.toString()); - verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).basicQuery(eq("test query"), eq(15)); verify(client, times(1)).nextPage(any()); verify(client, times(1)).queryClose(eq("my_cursor1"), eq(Mode.CLI)); verifyNoMoreInteractions(client); diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index c4ee030d4568f..613ca73a4dbad 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -18,9 +18,6 @@ dependencyLicenses { mapping from: /sql-proto.*/, to: 'elasticsearch' mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' mapping from: /elasticsearch-core.*/, to: 'elasticsearch' - mapping from: /lucene-.*/, to: 'lucene' - ignoreSha 'sql-action' - ignoreSha 'elasticsearch' ignoreSha 'elasticsearch-core' } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java index 6096f5baf865d..591762b18a985 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ConnectionConfiguration.java @@ -7,13 +7,13 @@ import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Enumeration; import java.util.LinkedHashSet; import java.util.Properties; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -28,6 +28,10 @@ * to move away from the loose Strings... */ public class ConnectionConfiguration { + + // Validation + public static final String PROPERTIES_VALIDATION = "validate.properties"; + public static final String PROPERTIES_VALIDATION_DEFAULT = "true"; // Timeouts @@ -59,12 +63,15 @@ public class ConnectionConfiguration { public static final String AUTH_PASS = "password"; protected static final Set OPTION_NAMES = new LinkedHashSet<>( - Arrays.asList(CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, AUTH_USER, AUTH_PASS)); + Arrays.asList(PROPERTIES_VALIDATION, CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, + AUTH_USER, AUTH_PASS)); static { OPTION_NAMES.addAll(SslConfig.OPTION_NAMES); OPTION_NAMES.addAll(ProxyConfig.OPTION_NAMES); } + + private final boolean validateProperties; // Base URI for all request private final URI baseURI; @@ -87,7 +94,11 @@ public ConnectionConfiguration(URI baseURI, String connectionString, Properties this.connectionString = connectionString; Properties settings = props != null ? props : new Properties(); - checkPropertyNames(settings, optionNames()); + validateProperties = parseValue(PROPERTIES_VALIDATION, settings.getProperty(PROPERTIES_VALIDATION, PROPERTIES_VALIDATION_DEFAULT), + Boolean::parseBoolean); + if (validateProperties) { + checkPropertyNames(settings, optionNames()); + } connectTimeout = parseValue(CONNECT_TIMEOUT, settings.getProperty(CONNECT_TIMEOUT, CONNECT_TIMEOUT_DEFAULT), Long::parseLong); networkTimeout = parseValue(NETWORK_TIMEOUT, settings.getProperty(NETWORK_TIMEOUT, NETWORK_TIMEOUT_DEFAULT), Long::parseLong); @@ -106,9 +117,10 @@ public ConnectionConfiguration(URI baseURI, String connectionString, Properties this.baseURI = normalizeSchema(baseURI, connectionString, sslConfig.isEnabled()); } - public ConnectionConfiguration(URI baseURI, String connectionString, long connectTimeout, long networkTimeout, long queryTimeout, - long pageTimeout, int pageSize, String user, String pass, SslConfig sslConfig, - ProxyConfig proxyConfig) throws ClientException { + public ConnectionConfiguration(URI baseURI, String connectionString, boolean validateProperties, long connectTimeout, + long networkTimeout, long queryTimeout, long pageTimeout, int pageSize, String user, String pass, + SslConfig sslConfig, ProxyConfig proxyConfig) throws ClientException { + this.validateProperties = validateProperties; this.connectionString = connectionString; this.connectTimeout = connectTimeout; this.networkTimeout = networkTimeout; @@ -136,13 +148,13 @@ private static URI normalizeSchema(URI uri, String connectionString, boolean isS } } - private Collection optionNames() { - Collection options = new ArrayList<>(OPTION_NAMES); + protected Collection optionNames() { + Set options = new TreeSet<>(OPTION_NAMES); options.addAll(extraOptions()); return options; } - protected Collection extraOptions() { + protected Collection extraOptions() { return emptyList(); } @@ -161,7 +173,7 @@ private static String isKnownProperty(String propertyName, Collection kn if (knownOptions.contains(propertyName)) { return null; } - return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimilar(propertyName, knownOptions); + return "Unknown parameter [" + propertyName + "]; did you mean " + StringUtils.findSimilar(propertyName, knownOptions); } protected T parseValue(String key, String value, Function parser) { @@ -175,6 +187,10 @@ protected T parseValue(String key, String value, Function parser) protected boolean isSSLEnabled() { return sslConfig.isEnabled(); } + + public boolean validateProperties() { + return validateProperties; + } public SslConfig sslConfig() { return sslConfig; diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index c3f35aefd65f4..0ef7570db7591 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -32,7 +32,6 @@ import java.security.AccessController; import java.security.PrivilegedAction; import java.sql.SQLException; -import java.time.ZoneId; import java.util.Collections; import java.util.function.Function; @@ -61,12 +60,17 @@ public MainResponse serverInfo() throws SQLException { return get("/", MainResponse::fromXContent); } - public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { + public SqlQueryResponse basicQuery(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about // method called only from CLI - SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), null, ZoneId.of("Z"), - fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), TimeValue.timeValueMillis(cfg.pageTimeout()), - new RequestInfo(Mode.CLI)); + SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), Protocol.TIME_ZONE, + fetchSize, + TimeValue.timeValueMillis(cfg.queryTimeout()), + TimeValue.timeValueMillis(cfg.pageTimeout()), + null, + null, + new RequestInfo(Mode.CLI), + false); return query(sqlRequest); } @@ -106,7 +110,7 @@ private Response post(String path } private boolean head(String path, long timeoutInMs) throws SQLException { - ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), + ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), cfg.validateProperties(), cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(), cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig()); try { diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index a10e3ff8c4ce1..b1c055a0dfcb8 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -14,7 +14,6 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } - compile "joda-time:joda-time:${versions.joda}" runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ecee..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt deleted file mode 100644 index 75b52484ea471..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt deleted file mode 100644 index dffbcf31cacf6..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -============================================================================= -= NOTICE file corresponding to section 4d of the Apache License Version 2.0 = -============================================================================= -This product includes software developed by -Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java index a6af79e0fbae2..13471afe2212f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java @@ -22,6 +22,7 @@ public final class Protocol { public static final int FETCH_SIZE = 1000; public static final TimeValue REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); public static final TimeValue PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); + public static final boolean FIELD_MULTI_VALUE_LENIENCY = false; /** * SQL-related endpoints diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java index 34b19faef781c..7528788a4364c 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java @@ -31,10 +31,13 @@ public class SqlQueryRequest extends AbstractSqlRequest { @Nullable private final ToXContent filter; private final List params; + private final boolean fieldMultiValueLeniency; public SqlQueryRequest(String query, List params, ZoneId zoneId, int fetchSize, - TimeValue requestTimeout, TimeValue pageTimeout, ToXContent filter, String cursor, RequestInfo requestInfo) { + TimeValue requestTimeout, TimeValue pageTimeout, ToXContent filter, + String cursor, RequestInfo requestInfo, + boolean fieldMultiValueLeniency) { super(requestInfo); this.query = query; this.params = params; @@ -44,16 +47,12 @@ public SqlQueryRequest(String query, List params, ZoneId zon this.pageTimeout = pageTimeout; this.filter = filter; this.cursor = cursor; - } - - public SqlQueryRequest(String query, List params, ToXContent filter, ZoneId zoneId, - int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, RequestInfo requestInfo) { - this(query, params, zoneId, fetchSize, requestTimeout, pageTimeout, filter, null, requestInfo); + this.fieldMultiValueLeniency = fieldMultiValueLeniency; } public SqlQueryRequest(String cursor, TimeValue requestTimeout, TimeValue pageTimeout, RequestInfo requestInfo) { this("", Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, requestTimeout, pageTimeout, - null, cursor, requestInfo); + null, cursor, requestInfo, Protocol.FIELD_MULTI_VALUE_LENIENCY); } /** @@ -114,6 +113,10 @@ public ToXContent filter() { return filter; } + public boolean fieldMultiValueLeniency() { + return fieldMultiValueLeniency; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -133,12 +136,14 @@ public boolean equals(Object o) { Objects.equals(requestTimeout, that.requestTimeout) && Objects.equals(pageTimeout, that.pageTimeout) && Objects.equals(filter, that.filter) && - Objects.equals(cursor, that.cursor); + Objects.equals(cursor, that.cursor) && + fieldMultiValueLeniency == that.fieldMultiValueLeniency; } @Override public int hashCode() { - return Objects.hash(super.hashCode(), query, zoneId, fetchSize, requestTimeout, pageTimeout, filter, cursor); + return Objects.hash(super.hashCode(), query, zoneId, fetchSize, requestTimeout, pageTimeout, + filter, cursor, fieldMultiValueLeniency); } @Override @@ -173,6 +178,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("filter"); filter.toXContent(builder, params); } + if (fieldMultiValueLeniency) { + builder.field("field_multi_value_leniency", fieldMultiValueLeniency); + } if (cursor != null) { builder.field("cursor", cursor); } diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index a11121feaa2bc..9102611cca5b7 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -224,6 +224,11 @@ primaryExpression | '(' expression ')' #parenthesizedExpression ; +builtinDateTimeFunction + : name=CURRENT_TIMESTAMP + | name=CURRENT_DATE + ; + castExpression : castTemplate | FUNCTION_ESC castTemplate ESC_END @@ -235,11 +240,6 @@ castTemplate : CAST '(' expression AS dataType ')' ; -builtinDateTimeFunction - : name=CURRENT_DATE ('(' ')')? - | name=CURRENT_TIMESTAMP ('(' precision=INTEGER_VALUE? ')')? - ; - convertTemplate : CONVERT '(' expression ',' dataType ')' ; @@ -338,7 +338,7 @@ string // http://developer.mimer.se/validator/sql-reserved-words.tml nonReserved : ANALYZE | ANALYZED - | CATALOGS | COLUMNS + | CATALOGS | COLUMNS | CURRENT_DATE | CURRENT_TIMESTAMP | DAY | DEBUG | EXECUTABLE | EXPLAIN | FIRST | FORMAT | FULL | FUNCTIONS diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 1f8dd7ca362ac..65a9410941b17 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.ArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.sql.plan.TableIdentifier; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.EsRelation; @@ -848,9 +849,11 @@ private Expression collectResolvedAndReplace(Expression e, Map list = getList(seen, fName); for (Function seenFunction : list) { if (seenFunction != f && f.arguments().equals(seenFunction.arguments())) { + // TODO: we should move to always compare the functions directly // Special check for COUNT: an already seen COUNT function will be returned only if its DISTINCT property // matches the one from the unresolved function to be checked. - if (seenFunction instanceof Count) { + // Same for LIKE/RLIKE: the equals function also compares the pattern of LIKE/RLIKE + if (seenFunction instanceof Count || seenFunction instanceof RegexMatch) { if (seenFunction.equals(f)){ return seenFunction; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index ac59b08dbb726..47c53e772d5dd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.util.StringUtils; import java.util.ArrayList; @@ -294,7 +295,8 @@ Collection verify(LogicalPlan plan) { */ private static boolean checkGroupBy(LogicalPlan p, Set localFailures, Map resolvedFunctions, Set groupingFailures) { - return checkGroupByAgg(p, localFailures, resolvedFunctions) + return checkGroupByInexactField(p, localFailures) + && checkGroupByAgg(p, localFailures, resolvedFunctions) && checkGroupByOrder(p, localFailures, groupingFailures) && checkGroupByHaving(p, localFailures, groupingFailures, resolvedFunctions); } @@ -463,6 +465,21 @@ private static boolean checkGroupByHavingHasOnlyAggs(Expression e, Set localFailures) { + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + + // The grouping can not be an aggregate function or an inexact field (e.g. text without a keyword) + a.groupings().forEach(e -> e.forEachUp(c -> { + EsField.Exact exact = c.getExactInfo(); + if (exact.hasExact() == false) { + localFailures.add(fail(c, "Field of data type [" + c.dataType().typeName + "] cannot be used for grouping; " + + exact.errorMsg())); + } + }, FieldAttribute.class)); + } + return true; + } // check whether plain columns specified in an agg are mentioned in the group-by private static boolean checkGroupByAgg(LogicalPlan p, Set localFailures, Map functions) { @@ -593,20 +610,36 @@ private static void checkGroupingFunctionInGroupBy(LogicalPlan p, Set l // check if the query has a grouping function (Histogram) but no GROUP BY if (p instanceof Project) { Project proj = (Project) p; - proj.projections().forEach(e -> e.forEachDown(f -> + proj.projections().forEach(e -> e.forEachDown(f -> localFailures.add(fail(f, "[{}] needs to be part of the grouping", Expressions.name(f))), GroupingFunction.class)); } else if (p instanceof Aggregate) { - // if it does have a GROUP BY, check if the groupings contain the grouping functions (Histograms) + // if it does have a GROUP BY, check if the groupings contain the grouping functions (Histograms) Aggregate a = (Aggregate) p; a.aggregates().forEach(agg -> agg.forEachDown(e -> { - if (a.groupings().size() == 0 + if (a.groupings().size() == 0 || Expressions.anyMatch(a.groupings(), g -> g instanceof Function && e.functionEquals((Function) g)) == false) { localFailures.add(fail(e, "[{}] needs to be part of the grouping", Expressions.name(e))); } + else { + checkGroupingFunctionTarget(e, localFailures); + } + }, GroupingFunction.class)); + + a.groupings().forEach(g -> g.forEachDown(e -> { + checkGroupingFunctionTarget(e, localFailures); }, GroupingFunction.class)); } } + private static void checkGroupingFunctionTarget(GroupingFunction f, Set localFailures) { + f.field().forEachDown(e -> { + if (e instanceof GroupingFunction) { + localFailures.add(fail(f.field(), "Cannot embed grouping functions within each other, found [{}] in [{}]", + Expressions.name(f.field()), Expressions.name(f))); + } + }); + } + private static void checkFilterOnAggs(LogicalPlan p, Set localFailures) { if (p instanceof Filter) { Filter filter = (Filter) p; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 43d356720f8ed..367c9ea3a149f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -324,7 +324,9 @@ static IndexResolution mergedMapping(String indexPattern, Map { return invalidF != null ? invalidF : createField(s, fieldCapab.getType(), emptyMap(), fieldCapab.isAggregatable()); }); @@ -413,6 +415,8 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac GetIndexRequest getIndexRequest = createGetIndexRequest(indexWildcard); client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { ImmutableOpenMap> mappings = getIndexResponse.getMappings(); + ImmutableOpenMap> aliases = getIndexResponse.getAliases(); + List results = new ArrayList<>(mappings.size()); Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; for (ObjectObjectCursor> indexMappings : mappings) { @@ -423,7 +427,20 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac * and not the concrete index: there is a well known information leak of the concrete index name in the response. */ String concreteIndex = indexMappings.key; - if (pattern == null || pattern.matcher(concreteIndex).matches()) { + + // take into account aliases + List aliasMetadata = aliases.get(concreteIndex); + boolean matchesAlias = false; + if (pattern != null && aliasMetadata != null) { + for (AliasMetaData aliasMeta : aliasMetadata) { + if (pattern.matcher(aliasMeta.alias()).matches()) { + matchesAlias = true; + break; + } + } + } + + if (pattern == null || matchesAlias || pattern.matcher(concreteIndex).matches()) { IndexResolution getIndexResult = buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value); if (getIndexResult.isValid()) { results.add(getIndexResult.get()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java index d6c7543f6afa2..0db87c6b944e4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java @@ -7,16 +7,12 @@ import org.elasticsearch.xpack.sql.ServerSqlException; -import java.util.Locale; - -import static java.lang.String.format; - /** * Thrown when we accidentally attempt to resolve something on on an unresolved entity. Throwing this * is always a bug. */ public class UnresolvedException extends ServerSqlException { public UnresolvedException(String action, Object target) { - super(format(Locale.ROOT, "Invalid call to %s on an unresolved object %s", action, target)); + super("Invalid call to {} on an unresolved object {}", action, target); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java index b09e98d11c17d..9adf45540f3c9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java @@ -133,8 +133,9 @@ public void onResponse(SearchResponse r) { return; } - updateCompositeAfterKey(r, query); - CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, mask, r, limit, serializeQuery(query), indices); + boolean hasAfterKey = updateCompositeAfterKey(r, query); + CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, mask, r, limit, + hasAfterKey ? serializeQuery(query) : null, indices); listener.onResponse(rowSet); } catch (Exception ex) { listener.onFailure(ex); @@ -167,7 +168,7 @@ static CompositeAggregation getComposite(SearchResponse response) { throw new SqlIllegalArgumentException("Unrecognized root group found; {}", agg.getClass()); } - static void updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder next) { + static boolean updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder next) { CompositeAggregation composite = getComposite(r); if (composite == null) { @@ -176,22 +177,25 @@ static void updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder next) Map afterKey = composite.afterKey(); // a null after-key means done - if (afterKey != null) { - AggregationBuilder aggBuilder = next.aggregations().getAggregatorFactories().iterator().next(); - // update after-key with the new value - if (aggBuilder instanceof CompositeAggregationBuilder) { - CompositeAggregationBuilder comp = (CompositeAggregationBuilder) aggBuilder; - comp.aggregateAfter(afterKey); - } else { - throw new SqlIllegalArgumentException("Invalid client request; expected a group-by but instead got {}", aggBuilder); - } + if (afterKey == null) { + return false; + } + + AggregationBuilder aggBuilder = next.aggregations().getAggregatorFactories().iterator().next(); + // update after-key with the new value + if (aggBuilder instanceof CompositeAggregationBuilder) { + CompositeAggregationBuilder comp = (CompositeAggregationBuilder) aggBuilder; + comp.aggregateAfter(afterKey); + return true; + } else { + throw new SqlIllegalArgumentException("Invalid client request; expected a group-by but instead got {}", aggBuilder); } } /** * Deserializes the search source from a byte array. */ - static SearchSourceBuilder deserializeQuery(NamedWriteableRegistry registry, byte[] source) throws IOException { + private static SearchSourceBuilder deserializeQuery(NamedWriteableRegistry registry, byte[] source) throws IOException { try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(source), registry)) { return new SearchSourceBuilder(in); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java index fbbc839fe1c76..79676b38c4b33 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java @@ -39,18 +39,22 @@ class CompositeAggsRowSet extends ResultRowSet { } // page size - size = limit < 0 ? buckets.size() : Math.min(buckets.size(), limit); + size = limit == -1 ? buckets.size() : Math.min(buckets.size(), limit); if (next == null) { cursor = Cursor.EMPTY; } else { - // compute remaining limit - int remainingLimit = limit - size; + // Compute remaining limit + + // If the limit is -1 then we have a local sorting (sort on aggregate function) that requires all the buckets + // to be processed so we stop only when all data is exhausted. + int remainingLimit = (limit == -1) ? limit : ((limit - size) >= 0 ? (limit - size) : 0); + // if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached // note that a composite agg might be valid but return zero groups (since these can be filtered with HAVING/bucket selector) // however the Querier takes care of that and keeps making requests until either the query is invalid or at least one response - // is returned - if (next == null || size == 0 || remainingLimit == 0) { + // is returned. + if (size == 0 || remainingLimit == 0) { cursor = Cursor.EMPTY; } else { cursor = new CompositeAggregationCursor(next, exts, mask, remainingLimit, indices); @@ -91,4 +95,4 @@ public int size() { public Cursor nextPageCursor() { return cursor; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 4975910b1b28a..70744772271ea 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -115,30 +115,28 @@ public void query(List output, QueryContainer query, String index, Ac listener = sortingColumns.isEmpty() ? listener : new LocalAggregationSorterListener(listener, sortingColumns, query.limit()); ActionListener l = null; - if (query.isAggsOnly()) { if (query.aggs().useImplicitGroupBy()) { - l = new ImplicitGroupActionListener(listener, client, timeout, output, query, search); + l = new ImplicitGroupActionListener(listener, client, cfg, output, query, search); } else { - l = new CompositeActionListener(listener, client, timeout, output, query, search); + l = new CompositeActionListener(listener, client, cfg, output, query, search); } } else { search.scroll(keepAlive); - l = new ScrollActionListener(listener, client, timeout, output, query); + l = new ScrollActionListener(listener, client, cfg, output, query); } client.search(search, l); } public static SearchRequest prepareRequest(Client client, SearchSourceBuilder source, TimeValue timeout, String... indices) { - SearchRequest search = client.prepareSearch(indices) + return client.prepareSearch(indices) // always track total hits accurately .setTrackTotalHits(true) .setAllowPartialSearchResults(false) .setSource(source) .setTimeout(timeout) .request(); - return search; } /** @@ -153,7 +151,7 @@ class LocalAggregationSorterListener implements ActionListener { private final ActionListener listener; // keep the top N entries. - private final PriorityQueue, Integer>> data; + private final AggSortingQueue data; private final AtomicInteger counter = new AtomicInteger(); private volatile Schema schema; @@ -169,53 +167,12 @@ class LocalAggregationSorterListener implements ActionListener { } else { noLimit = false; if (limit > MAXIMUM_SIZE) { - throw new PlanningException("The maximum LIMIT for aggregate sorting is [{}], received [{}]", limit, MAXIMUM_SIZE); + throw new PlanningException("The maximum LIMIT for aggregate sorting is [{}], received [{}]", MAXIMUM_SIZE, limit); } else { size = limit; } } - - this.data = new PriorityQueue, Integer>>(size) { - - // compare row based on the received attribute sort - // if a sort item is not in the list, it is assumed the sorting happened in ES - // and the results are left as is (by using the row ordering), otherwise it is sorted based on the given criteria. - // - // Take for example ORDER BY a, x, b, y - // a, b - are sorted in ES - // x, y - need to be sorted client-side - // sorting on x kicks in, only if the values for a are equal. - - // thanks to @jpountz for the row ordering idea as a way to preserve ordering - @SuppressWarnings("unchecked") - @Override - protected boolean lessThan(Tuple, Integer> l, Tuple, Integer> r) { - for (Tuple tuple : sortingColumns) { - int i = tuple.v1().intValue(); - Comparator comparator = tuple.v2(); - - Object vl = l.v1().get(i); - Object vr = r.v1().get(i); - if (comparator != null) { - int result = comparator.compare(vl, vr); - // if things are equals, move to the next comparator - if (result != 0) { - return result < 0; - } - } - // no comparator means the existing order needs to be preserved - else { - // check the values - if they are equal move to the next comparator - // otherwise return the row order - if (Objects.equals(vl, vr) == false) { - return l.v2().compareTo(r.v2()) < 0; - } - } - } - // everything is equal, fall-back to the row order - return l.v2().compareTo(r.v2()) < 0; - } - }; + this.data = new AggSortingQueue(size, sortingColumns); } @Override @@ -226,9 +183,8 @@ public void onResponse(SchemaRowSet schemaRowSet) { private void doResponse(RowSet rowSet) { // 1. consume all pages received - if (consumeRowSet(rowSet) == false) { - return; - } + consumeRowSet(rowSet); + Cursor cursor = rowSet.nextPageCursor(); // 1a. trigger a next call if there's still data if (cursor != Cursor.EMPTY) { @@ -243,31 +199,21 @@ private void doResponse(RowSet rowSet) { sendResponse(); } - private boolean consumeRowSet(RowSet rowSet) { - // use a synchronized block for visibility purposes (there's no concurrency) + private void consumeRowSet(RowSet rowSet) { ResultRowSet rrs = (ResultRowSet) rowSet; - synchronized (data) { - for (boolean hasRows = rrs.hasCurrentRow(); hasRows; hasRows = rrs.advanceRow()) { - List row = new ArrayList<>(rrs.columnCount()); - rrs.forEachResultColumn(row::add); - // if the queue overflows and no limit was specified, bail out - if (data.insertWithOverflow(new Tuple<>(row, counter.getAndIncrement())) != null && noLimit) { - onFailure(new SqlIllegalArgumentException( - "The default limit [{}] for aggregate sorting has been reached; please specify a LIMIT")); - return false; - } + for (boolean hasRows = rrs.hasCurrentRow(); hasRows; hasRows = rrs.advanceRow()) { + List row = new ArrayList<>(rrs.columnCount()); + rrs.forEachResultColumn(row::add); + // if the queue overflows and no limit was specified, throw an error + if (data.insertWithOverflow(new Tuple<>(row, counter.getAndIncrement())) != null && noLimit) { + onFailure(new SqlIllegalArgumentException( + "The default limit [{}] for aggregate sorting has been reached; please specify a LIMIT", MAXIMUM_SIZE)); } } - return true; } private void sendResponse() { - List> list = new ArrayList<>(data.size()); - Tuple, Integer> pop = null; - while ((pop = data.pop()) != null) { - list.add(pop.v1()); - } - listener.onResponse(new PagingListRowSet(schema, list, schema.size(), cfg.pageSize())); + listener.onResponse(new PagingListRowSet(schema, data.asList(), schema.size(), cfg.pageSize())); } @Override @@ -309,9 +255,9 @@ public Aggregations getAggregations() { } }); - ImplicitGroupActionListener(ActionListener listener, Client client, TimeValue keepAlive, List output, + ImplicitGroupActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { - super(listener, client, keepAlive, output, query, request); + super(listener, client, cfg, output, query, request); } @Override @@ -360,16 +306,15 @@ private void handleBuckets(List buckets, SearchResponse respon */ static class CompositeActionListener extends BaseAggActionListener { - CompositeActionListener(ActionListener listener, Client client, TimeValue keepAlive, + CompositeActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { - super(listener, client, keepAlive, output, query, request); + super(listener, client, cfg, output, query, request); } - @Override protected void handleResponse(SearchResponse response, ActionListener listener) { // there are some results - if (response.getAggregations().asList().size() > 0) { + if (response.getAggregations().asList().isEmpty() == false) { // retry if (CompositeAggregationCursor.shouldRetryDueToEmptyPage(response)) { @@ -379,7 +324,7 @@ protected void handleResponse(SearchResponse response, ActionListener listener, Client client, TimeValue keepAlive, List output, + BaseAggActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { - super(listener, client, keepAlive, output); + super(listener, client, cfg, output); this.query = query; this.request = request; @@ -428,17 +374,17 @@ protected List initBucketExtractors(SearchResponse response) { private BucketExtractor createExtractor(FieldExtraction ref, BucketExtractor totalCount) { if (ref instanceof GroupByRef) { GroupByRef r = (GroupByRef) ref; - return new CompositeKeyExtractor(r.key(), r.property(), r.zoneId()); + return new CompositeKeyExtractor(r.key(), r.property(), cfg.zoneId(), r.isDateTimeBased()); } if (ref instanceof MetricAggRef) { MetricAggRef r = (MetricAggRef) ref; - return new MetricAggExtractor(r.name(), r.property(), r.innerKey()); + return new MetricAggExtractor(r.name(), r.property(), r.innerKey(), cfg.zoneId(), r.isDateTimeBased()); } if (ref instanceof TopHitsAggRef) { TopHitsAggRef r = (TopHitsAggRef) ref; - return new TopHitsAggExtractor(r.name(), r.fieldDataType()); + return new TopHitsAggExtractor(r.name(), r.fieldDataType(), cfg.zoneId()); } if (ref == GlobalCountRef.INSTANCE) { @@ -467,12 +413,14 @@ private BucketExtractor createExtractor(FieldExtraction ref, BucketExtractor tot static class ScrollActionListener extends BaseActionListener { private final QueryContainer query; private final BitSet mask; + private final boolean multiValueFieldLeniency; - ScrollActionListener(ActionListener listener, Client client, TimeValue keepAlive, + ScrollActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query) { - super(listener, client, keepAlive, output); + super(listener, client, cfg, output); this.query = query; this.mask = query.columnMask(output); + this.multiValueFieldLeniency = cfg.multiValueFieldLeniency(); } @Override @@ -516,12 +464,13 @@ protected void handleResponse(SearchResponse response, ActionListener listener; final Client client; + final Configuration cfg; final TimeValue keepAlive; final Schema schema; - BaseActionListener(ActionListener listener, Client client, TimeValue keepAlive, List output) { + BaseActionListener(ActionListener listener, Client client, Configuration cfg, List output) { this.listener = listener; this.client = client; - this.keepAlive = keepAlive; + this.cfg = cfg; + this.keepAlive = cfg.requestTimeout(); this.schema = Rows.schema(output); } @@ -616,4 +567,63 @@ public final void onFailure(Exception ex) { listener.onFailure(ex); } } + + @SuppressWarnings("rawtypes") + static class AggSortingQueue extends PriorityQueue, Integer>> { + + private List> sortingColumns; + + AggSortingQueue(int maxSize, List> sortingColumns) { + super(maxSize); + this.sortingColumns = sortingColumns; + } + + // compare row based on the received attribute sort + // if a sort item is not in the list, it is assumed the sorting happened in ES + // and the results are left as is (by using the row ordering), otherwise it is sorted based on the given criteria. + // + // Take for example ORDER BY a, x, b, y + // a, b - are sorted in ES + // x, y - need to be sorted client-side + // sorting on x kicks in, only if the values for a are equal. + + // thanks to @jpountz for the row ordering idea as a way to preserve ordering + @SuppressWarnings("unchecked") + @Override + protected boolean lessThan(Tuple, Integer> l, Tuple, Integer> r) { + for (Tuple tuple : sortingColumns) { + int i = tuple.v1().intValue(); + Comparator comparator = tuple.v2(); + + Object vl = l.v1().get(i); + Object vr = r.v1().get(i); + if (comparator != null) { + int result = comparator.compare(vl, vr); + // if things are equals, move to the next comparator + if (result != 0) { + return result > 0; + } + } + // no comparator means the existing order needs to be preserved + else { + // check the values - if they are equal move to the next comparator + // otherwise return the row order + if (Objects.equals(vl, vr) == false) { + return l.v2().compareTo(r.v2()) > 0; + } + } + } + // everything is equal, fall-back to the row order + return l.v2().compareTo(r.v2()) > 0; + } + + List> asList() { + List> list = new ArrayList<>(super.size()); + Tuple, Integer> pop; + while ((pop = pop()) != null) { + list.add(0, pop.v1()); + } + return list; + } + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java index bad618161e58c..3ca0757dbbff8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java @@ -33,4 +33,4 @@ class SchemaCompositeAggsRowSet extends CompositeAggsRowSet implements SchemaRow public Schema schema() { return schema; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java index c22b1213d09dc..4e343c1e54f5a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -33,6 +33,8 @@ public abstract class SourceGenerator { + private SourceGenerator() {} + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { @@ -79,7 +81,12 @@ public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryB source.size(sz); } if (aggBuilder instanceof CompositeAggregationBuilder) { - ((CompositeAggregationBuilder) aggBuilder).size(sz); + // limit the composite aggs only for non-local sorting + if (container.sortingColumns().isEmpty()) { + ((CompositeAggregationBuilder) aggBuilder).size(sz); + } else { + ((CompositeAggregationBuilder) aggBuilder).size(size); + } } } @@ -107,8 +114,7 @@ private static void sorting(QueryContainer container, SearchSourceBuilder source // sorting only works on not-analyzed fields - look for a multi-field replacement if (attr instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) attr; - fa = fa.isInexact() ? fa.exactAttribute() : fa; + FieldAttribute fa = ((FieldAttribute) attr).exactAttribute(); sortBuilder = fieldSort(fa.name()) .missing(as.missing().position()) @@ -125,7 +131,8 @@ private static void sorting(QueryContainer container, SearchSourceBuilder source if (nestedSort == null) { fieldSort.setNestedSort(newSort); } else { - for (; nestedSort.getNestedSort() != null; nestedSort = nestedSort.getNestedSort()) { + while (nestedSort.getNestedSort() != null) { + nestedSort = nestedSort.getNestedSort(); } nestedSort.setNestedSort(newSort); } @@ -167,6 +174,9 @@ private static void optimize(QueryContainer query, SearchSourceBuilder builder) // disable source fetching (only doc values are used) disableSource(builder); } + if (query.shouldTrackHits()) { + builder.trackTotalHits(true); + } } private static void disableSource(SearchSourceBuilder builder) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java index 61e1e6bc67ef0..1b2e8a3abc099 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java @@ -20,44 +20,38 @@ public class CompositeKeyExtractor implements BucketExtractor { /** - * Key or Komposite extractor. + * Key or Composite extractor. */ static final String NAME = "k"; private final String key; private final Property property; private final ZoneId zoneId; + private final boolean isDateTimeBased; /** * Constructs a new CompositeKeyExtractor instance. - * The time-zone parameter is used to indicate a date key. */ - public CompositeKeyExtractor(String key, Property property, ZoneId zoneId) { + public CompositeKeyExtractor(String key, Property property, ZoneId zoneId, boolean isDateTimeBased) { this.key = key; this.property = property; this.zoneId = zoneId; + this.isDateTimeBased = isDateTimeBased; } CompositeKeyExtractor(StreamInput in) throws IOException { key = in.readString(); property = in.readEnum(Property.class); - if (in.readBoolean()) { - zoneId = ZoneId.of(in.readString()); - } else { - zoneId = null; - } + zoneId = ZoneId.of(in.readString()); + isDateTimeBased = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(key); out.writeEnum(property); - if (zoneId == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(zoneId.getId()); - } + out.writeString(zoneId.getId()); + out.writeBoolean(isDateTimeBased); } String key() { @@ -72,6 +66,10 @@ ZoneId zoneId() { return zoneId; } + public boolean isDateTimeBased() { + return isDateTimeBased; + } + @Override public String getWriteableName() { return NAME; @@ -91,7 +89,7 @@ public Object extract(Bucket bucket) { Object object = ((Map) m).get(key); - if (zoneId != null) { + if (isDateTimeBased) { if (object == null) { return object; } else if (object instanceof Long) { @@ -106,7 +104,7 @@ public Object extract(Bucket bucket) { @Override public int hashCode() { - return Objects.hash(key, property, zoneId); + return Objects.hash(key, property, zoneId, isDateTimeBased); } @Override @@ -122,7 +120,8 @@ public boolean equals(Object obj) { CompositeKeyExtractor other = (CompositeKeyExtractor) obj; return Objects.equals(key, other.key) && Objects.equals(property, other.property) - && Objects.equals(zoneId, other.zoneId); + && Objects.equals(zoneId, other.zoneId) + && Objects.equals(isDateTimeBased, other.isDateTimeBased); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 589481247ac39..9023175d2c423 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -14,9 +14,9 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; -import org.joda.time.DateTime; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayDeque; import java.util.Deque; import java.util.List; @@ -30,8 +30,6 @@ */ public class FieldHitExtractor implements HitExtractor { - private static final boolean ARRAYS_LENIENCY = false; - /** * Stands for {@code field}. We try to use short names for {@link HitExtractor}s * to save a few bytes when when we send them back to the user. @@ -48,17 +46,25 @@ private static String[] sourcePath(String name, boolean useDocValue, String hitN private final String fieldName, hitName; private final DataType dataType; + private final ZoneId zoneId; private final boolean useDocValue; + private final boolean arrayLeniency; private final String[] path; - public FieldHitExtractor(String name, DataType dataType, boolean useDocValue) { - this(name, dataType, useDocValue, null); + public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, boolean useDocValue) { + this(name, dataType, zoneId, useDocValue, null, false); + } + + public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, boolean useDocValue, boolean arrayLeniency) { + this(name, dataType, zoneId, useDocValue, null, arrayLeniency); } - public FieldHitExtractor(String name, DataType dataType, boolean useDocValue, String hitName) { + public FieldHitExtractor(String name, DataType dataType, ZoneId zoneId, boolean useDocValue, String hitName, boolean arrayLeniency) { this.fieldName = name; this.dataType = dataType; + this.zoneId = zoneId; this.useDocValue = useDocValue; + this.arrayLeniency = arrayLeniency; this.hitName = hitName; if (hitName != null) { @@ -74,8 +80,10 @@ public FieldHitExtractor(String name, DataType dataType, boolean useDocValue, St fieldName = in.readString(); String esType = in.readOptionalString(); dataType = esType != null ? DataType.fromTypeName(esType) : null; + zoneId = ZoneId.of(in.readString()); useDocValue = in.readBoolean(); hitName = in.readOptionalString(); + arrayLeniency = in.readBoolean(); path = sourcePath(fieldName, useDocValue, hitName); } @@ -88,8 +96,10 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeOptionalString(dataType == null ? null : dataType.typeName); + out.writeString(zoneId.getId()); out.writeBoolean(useDocValue); out.writeOptionalString(hitName); + out.writeBoolean(arrayLeniency); } @Override @@ -118,7 +128,7 @@ private Object unwrapMultiValue(Object values) { if (list.isEmpty()) { return null; } else { - if (ARRAYS_LENIENCY || list.size() == 1) { + if (arrayLeniency || list.size() == 1) { return unwrapMultiValue(list.get(0)); } else { throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); @@ -130,26 +140,25 @@ private Object unwrapMultiValue(Object values) { } if (dataType == DataType.DATETIME) { if (values instanceof String) { - return DateUtils.asDateTime(Long.parseLong(values.toString())); - } - // returned by nested types... - if (values instanceof DateTime) { - return DateUtils.asDateTime((DateTime) values); + return DateUtils.asDateTime(Long.parseLong(values.toString()), zoneId); } } - if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean) { + // The Jackson json parser can generate for numerics - Integers, Longs, BigIntegers (if Long is not enough) + // and BigDecimal (if Double is not enough) + if (values instanceof Number + || values instanceof String + || values instanceof Boolean) { return values; } throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); } - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "rawtypes" }) Object extractFromSource(Map map) { Object value = null; // Used to avoid recursive method calls - // Holds the sub-maps in the document hierarchy that are pending to be inspected. - // along with the current index of the `path`. + // Holds the sub-maps in the document hierarchy that are pending to be inspected along with the current index of the `path`. Deque>> queue = new ArrayDeque<>(); queue.add(new Tuple<>(-1, map)); @@ -165,6 +174,20 @@ Object extractFromSource(Map map) { for (int i = idx + 1; i < path.length; i++) { sj.add(path[i]); Object node = subMap.get(sj.toString()); + + if (node instanceof List) { + List listOfValues = (List) node; + if ((i < path.length - 1) && (listOfValues.size() == 1 || arrayLeniency)) { + // this is a List with a size of 1 e.g.: {"a" : [{"b" : "value"}]} meaning the JSON is a list with one element + // or a list of values with one element e.g.: {"a": {"b" : ["value"]}} + // in case of being lenient about arrays, just extract the first value in the array + node = listOfValues.get(0); + } else { + // a List of elements with more than one value. Break early and let unwrapMultiValue deal with the list + return unwrapMultiValue(node); + } + } + if (node instanceof Map) { if (i < path.length - 1) { // Add the sub-map to the queue along with the current path index @@ -202,9 +225,17 @@ public String fieldName() { return fieldName; } + public ZoneId zoneId() { + return zoneId; + } + + DataType dataType() { + return dataType; + } + @Override public String toString() { - return fieldName + "@" + hitName; + return fieldName + "@" + hitName + "@" + zoneId; } @Override @@ -215,11 +246,12 @@ public boolean equals(Object obj) { FieldHitExtractor other = (FieldHitExtractor) obj; return fieldName.equals(other.fieldName) && hitName.equals(other.hitName) - && useDocValue == other.useDocValue; + && useDocValue == other.useDocValue + && arrayLeniency == other.arrayLeniency; } @Override public int hashCode() { - return Objects.hash(fieldName, useDocValue, hitName); + return Objects.hash(fieldName, useDocValue, hitName, arrayLeniency); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java index e24bf4d0adaa5..d97bb54bd9864 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java @@ -18,8 +18,10 @@ import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; @@ -30,17 +32,23 @@ public class MetricAggExtractor implements BucketExtractor { private final String name; private final String property; private final String innerKey; + private final boolean isDateTimeBased; + private final ZoneId zoneId; - public MetricAggExtractor(String name, String property, String innerKey) { + public MetricAggExtractor(String name, String property, String innerKey, ZoneId zoneId, boolean isDateTimeBased) { this.name = name; this.property = property; this.innerKey = innerKey; + this. isDateTimeBased =isDateTimeBased; + this.zoneId = zoneId; } MetricAggExtractor(StreamInput in) throws IOException { name = in.readString(); property = in.readString(); innerKey = in.readOptionalString(); + isDateTimeBased = in.readBoolean(); + zoneId = ZoneId.of(in.readString()); } @Override @@ -48,6 +56,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(property); out.writeOptionalString(innerKey); + out.writeBoolean(isDateTimeBased); + out.writeString(zoneId.getId()); } String name() { @@ -62,6 +72,10 @@ String innerKey() { return innerKey; } + ZoneId zoneId() { + return zoneId; + } + @Override public String getWriteableName() { return NAME; @@ -83,20 +97,33 @@ public Object extract(Bucket bucket) { //if (innerKey == null) { // throw new SqlIllegalArgumentException("Invalid innerKey {} specified for aggregation {}", innerKey, name); //} - return ((InternalNumericMetricsAggregation.MultiValue) agg).value(property); + return handleDateTime(((InternalNumericMetricsAggregation.MultiValue) agg).value(property)); } else if (agg instanceof InternalFilter) { // COUNT(expr) and COUNT(ALL expr) uses this type of aggregation to account for non-null values only return ((InternalFilter) agg).getDocCount(); } Object v = agg.getProperty(property); - return innerKey != null && v instanceof Map ? ((Map) v).get(innerKey) : v; + return handleDateTime(innerKey != null && v instanceof Map ? ((Map) v).get(innerKey) : v); + } + + private Object handleDateTime(Object object) { + if (isDateTimeBased) { + if (object == null) { + return object; + } else if (object instanceof Number) { + return DateUtils.asDateTime(((Number) object).longValue(), zoneId); + } else { + throw new SqlIllegalArgumentException("Invalid date key returned: {}", object); + } + } + return object; } /** * Check if the given aggregate has been executed and has computed values * or not (the bucket is null). - * + * * Waiting on https://github.com/elastic/elasticsearch/issues/34903 */ private static boolean containsValues(InternalAggregation agg) { @@ -130,11 +157,11 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - + if (obj == null || getClass() != obj.getClass()) { return false; } - + MetricAggExtractor other = (MetricAggExtractor) obj; return Objects.equals(name, other.name) && Objects.equals(property, other.property) @@ -146,4 +173,4 @@ public String toString() { String i = innerKey != null ? "[" + innerKey + "]" : ""; return Aggs.ROOT_GROUP_NAME + ">" + name + "." + property + i; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java index 429ff2edfc984..b541df7e81a81 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.time.ZoneId; import java.util.Objects; public class TopHitsAggExtractor implements BucketExtractor { @@ -22,27 +23,39 @@ public class TopHitsAggExtractor implements BucketExtractor { private final String name; private final DataType fieldDataType; + private final ZoneId zoneId; - public TopHitsAggExtractor(String name, DataType fieldDataType) { + public TopHitsAggExtractor(String name, DataType fieldDataType, ZoneId zoneId) { this.name = name; this.fieldDataType = fieldDataType; + this.zoneId = zoneId; } TopHitsAggExtractor(StreamInput in) throws IOException { name = in.readString(); fieldDataType = in.readEnum(DataType.class); + zoneId = ZoneId.of(in.readString()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeEnum(fieldDataType); + out.writeString(zoneId.getId()); } String name() { return name; } + DataType fieldDataType() { + return fieldDataType; + } + + ZoneId zoneId() { + return zoneId; + } + @Override public String getWriteableName() { return NAME; @@ -61,7 +74,7 @@ public Object extract(Bucket bucket) { Object value = agg.getHits().getAt(0).getFields().values().iterator().next().getValue(); if (fieldDataType.isDateBased()) { - return DateUtils.asDateTime(Long.parseLong(value.toString())); + return DateUtils.asDateTime(Long.parseLong(value.toString()), zoneId); } else { return value; } @@ -69,7 +82,7 @@ public Object extract(Bucket bucket) { @Override public int hashCode() { - return Objects.hash(name, fieldDataType); + return Objects.hash(name, fieldDataType, zoneId); } @Override @@ -84,11 +97,12 @@ public boolean equals(Object obj) { TopHitsAggExtractor other = (TopHitsAggExtractor) obj; return Objects.equals(name, other.name) - && Objects.equals(fieldDataType, other.fieldDataType); + && Objects.equals(fieldDataType, other.fieldDataType) + && Objects.equals(zoneId, other.zoneId); } @Override public String toString() { - return "TopHits>" + name + "[" + fieldDataType + "]"; + return "TopHits>" + name + "[" + fieldDataType + "]@" + zoneId; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java index 745cc36e34a57..616c337e64c9a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java @@ -14,9 +14,6 @@ import org.elasticsearch.xpack.sql.util.StringUtils; import java.util.List; -import java.util.Locale; - -import static java.lang.String.format; /** * In a SQL statement, an Expression is whatever a user specifies inside an @@ -39,10 +36,6 @@ public TypeResolution(String message) { this(true, message); } - TypeResolution(String message, Object... args) { - this(true, format(Locale.ROOT, message, args)); - } - private TypeResolution(boolean unresolved, String message) { this.failed = unresolved; this.message = message; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 04d660642c8b2..ca5e4b757567c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -6,22 +6,16 @@ package org.elasticsearch.xpack.sql.expression; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.DataTypes; import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; -import java.util.StringJoiner; import java.util.function.Predicate; -import static java.lang.String.format; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; public final class Expressions { @@ -154,55 +148,4 @@ public static List pipe(List expressions) { } return pipes; } - - public static TypeResolution typeMustBeBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); - } - - public static TypeResolution typeMustBeInteger(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isInteger, operationName, paramOrd, "integer"); - } - - public static TypeResolution typeMustBeNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isNumeric, operationName, paramOrd, "numeric"); - } - - public static TypeResolution typeMustBeString(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isString, operationName, paramOrd, "string"); - } - - public static TypeResolution typeMustBeDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, DataType::isDateBased, operationName, paramOrd, "date", "datetime"); - } - - public static TypeResolution typeMustBeNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { - return typeMustBe(e, dt -> dt.isNumeric() || dt.isDateBased(), operationName, paramOrd, "date", "datetime", "numeric"); - } - - public static TypeResolution typeMustBe(Expression e, - Predicate predicate, - String operationName, - ParamOrdinal paramOrd, - String... acceptedTypes) { - return predicate.test(e.dataType()) || DataTypes.isNull(e.dataType())? - TypeResolution.TYPE_RESOLVED : - new TypeResolution(format(Locale.ROOT, "[%s]%s argument must be [%s], found value [%s] type [%s]", - operationName, - paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : " " + paramOrd.name().toLowerCase(Locale.ROOT), - acceptedTypesForErrorMsg(acceptedTypes), - Expressions.name(e), - e.dataType().typeName)); - } - - private static String acceptedTypesForErrorMsg(String... acceptedTypes) { - StringJoiner sj = new StringJoiner(", "); - for (int i = 0; i < acceptedTypes.length - 1; i++) { - sj.add(acceptedTypes[i]); - } - if (acceptedTypes.length > 1) { - return sj.toString() + " or " + acceptedTypes[acceptedTypes.length - 1]; - } else { - return acceptedTypes[0]; - } - } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java index 832af029df315..cb86e2742b2d8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java @@ -81,13 +81,14 @@ public FieldAttribute nestedParent() { return nestedParent; } - public boolean isInexact() { - return field.isExact() == false; + public EsField.Exact getExactInfo() { + return field.getExactInfo(); } public FieldAttribute exactAttribute() { - if (field.isExact() == false) { - return innerField(field.getExactField()); + EsField exactField = field.getExactField(); + if (exactField.equals(field) == false) { + return innerField(exactField); } return this; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java index 6a57c3275d4d1..267a8827d8cd6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.sql.expression; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.List; import java.util.Objects; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; public class Order extends Expression { @@ -45,6 +46,11 @@ public Nullability nullable() { return Nullability.FALSE; } + @Override + protected TypeResolution resolveType() { + return isExact(child, "ORDER BY cannot be applied to field of data type [{}]: {}"); + } + @Override public DataType dataType() { return child.dataType(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java new file mode 100644 index 0000000000000..61bc8ed44a9a8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.type.EsField; + +import java.util.Locale; +import java.util.StringJoiner; +import java.util.function.Predicate; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; +import static org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import static org.elasticsearch.xpack.sql.expression.Expressions.name; +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; + +public final class TypeResolutions { + + private TypeResolutions() {} + + public static TypeResolution isBoolean(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, dt -> dt == BOOLEAN, operationName, paramOrd, "boolean"); + } + + public static TypeResolution isInteger(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isInteger, operationName, paramOrd, "integer"); + } + + public static TypeResolution isNumeric(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isNumeric, operationName, paramOrd, "numeric"); + } + + public static TypeResolution isString(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isString, operationName, paramOrd, "string"); + } + + public static TypeResolution isDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, DataType::isDateBased, operationName, paramOrd, "date", "datetime"); + } + + public static TypeResolution isNumericOrDate(Expression e, String operationName, ParamOrdinal paramOrd) { + return isType(e, dt -> dt.isNumeric() || dt.isDateBased(), operationName, paramOrd, "date", "datetime", "numeric"); + } + + public static TypeResolution isExact(Expression e, String message) { + if (e instanceof FieldAttribute) { + EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); + if (exact.hasExact() == false) { + return new TypeResolution(format(null, message, e.dataType().typeName, exact.errorMsg())); + } + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isExact(Expression e, String operationName, ParamOrdinal paramOrd) { + if (e instanceof FieldAttribute) { + EsField.Exact exact = ((FieldAttribute) e).getExactInfo(); + if (exact.hasExact() == false) { + return new TypeResolution(format(null, "[{}] cannot operate on {}field of data type [{}]: {}", + operationName, + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? + "" : paramOrd.name().toLowerCase(Locale.ROOT) + " argument ", + e.dataType().typeName, exact.errorMsg())); + } + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isStringAndExact(Expression e, String operationName, ParamOrdinal paramOrd) { + TypeResolution resolution = isString(e, operationName, paramOrd); + if (resolution.unresolved()) { + return resolution; + } + + return isExact(e, operationName, paramOrd); + } + + public static TypeResolution isFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { + if (!e.foldable()) { + return new TypeResolution(format(null, "{}argument of [{}] must be a constant, received [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + Expressions.name(e))); + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isNotFoldable(Expression e, String operationName, ParamOrdinal paramOrd) { + if (e.foldable()) { + return new TypeResolution(format(null, "{}argument of [{}] must be a table column, found constant [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + Expressions.name(e))); + } + return TypeResolution.TYPE_RESOLVED; + } + + public static TypeResolution isType(Expression e, + Predicate predicate, + String operationName, + ParamOrdinal paramOrd, + String... acceptedTypes) { + return predicate.test(e.dataType()) || DataTypes.isNull(e.dataType())? + TypeResolution.TYPE_RESOLVED : + new TypeResolution(format(null, "{}argument of [{}] must be [{}], found value [{}] type [{}]", + paramOrd == null || paramOrd == ParamOrdinal.DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " ", + operationName, + acceptedTypesForErrorMsg(acceptedTypes), + name(e), + e.dataType().typeName)); + } + + private static String acceptedTypesForErrorMsg(String... acceptedTypes) { + StringJoiner sj = new StringJoiner(", "); + for (int i = 0; i < acceptedTypes.length - 1; i++) { + sj.add(acceptedTypes[i]); + } + if (acceptedTypes.length > 1) { + return sj.toString() + " or " + acceptedTypes[acceptedTypes.length - 1]; + } else { + return acceptedTypes[0]; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java index b432c5063a64b..177f598dc9a46 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java @@ -7,6 +7,8 @@ import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.function.Function; import org.elasticsearch.xpack.sql.expression.gen.pipeline.AggNameInput; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -78,8 +80,13 @@ public boolean equals(Object obj) { && Objects.equals(other.parameters(), parameters()); } + @Override + protected TypeResolution resolveType() { + return TypeResolutions.isExact(field, sourceText(), Expressions.ParamOrdinal.DEFAULT); + } + @Override public int hashCode() { return Objects.hash(field(), parameters()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java index 95a1b50cc1139..236cf105a4c80 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java @@ -63,16 +63,6 @@ public String functionId() { return functionId; } - @Override - public String name() { - if (distinct()) { - StringBuilder sb = new StringBuilder(super.name()); - sb.insert(sb.indexOf("(") + 1, "DISTINCT "); - return sb.toString(); - } - return super.name(); - } - @Override public AggregateFunctionAttribute toAttribute() { // COUNT(*) gets its value from the parent aggregation on which _count is called diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java index 898c98463445e..5827083343a0f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -14,6 +13,9 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; + /** * Find the maximum value in matching documents. */ @@ -46,9 +48,9 @@ public String innerName() { @Override protected TypeResolution resolveType() { if (field().dataType().isString()) { - return TypeResolution.TYPE_RESOLVED; + return isExact(field(), sourceText(), ParamOrdinal.DEFAULT); } else { - return Expressions.typeMustBeNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java index 8652759fca486..e64774fe8e720 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -14,6 +13,9 @@ import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isExact; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; + /** * Find the minimum value in matched documents. */ @@ -49,9 +51,9 @@ public String innerName() { @Override protected TypeResolution resolveType() { if (field().dataType().isString()) { - return TypeResolution.TYPE_RESOLVED; + return isExact(field(), sourceText(), ParamOrdinal.DEFAULT); } else { - return Expressions.typeMustBeNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumericOrDate(field(), sourceText(), ParamOrdinal.DEFAULT); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java index bfe0d2ded7e34..21d5c23d23a5a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java @@ -6,13 +6,14 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.List; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + abstract class NumericAggregate extends AggregateFunction { NumericAggregate(Source source, Expression field, List parameters) { @@ -25,7 +26,7 @@ abstract class NumericAggregate extends AggregateFunction { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java index 76c7bda320012..a0585f4c02176 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -16,7 +15,8 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public class Percentile extends NumericAggregate implements EnclosedAgg { @@ -42,17 +42,17 @@ public Percentile replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { - if (!percent.foldable()) { - return new TypeResolution(format(null, "Second argument of PERCENTILE must be a constant, received [{}]", - Expressions.name(percent))); + TypeResolution resolution = isFoldable(percent, sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - TypeResolution resolution = super.resolveType(); + resolution = super.resolveType(); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(percent, sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(percent, sourceText(), ParamOrdinal.DEFAULT); } public Expression percent() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java index b30b38a01b6c5..da8c487ff31f9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.aggregate; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -16,7 +15,8 @@ import java.util.List; import static java.util.Collections.singletonList; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isFoldable; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public class PercentileRank extends AggregateFunction implements EnclosedAgg { @@ -42,17 +42,17 @@ public Expression replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { - if (!value.foldable()) { - return new TypeResolution(format(null, "Second argument of PERCENTILE_RANK must be a constant, received [{}]", - Expressions.name(value))); + TypeResolution resolution = isFoldable(value, sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - TypeResolution resolution = super.resolveType(); + resolution = super.resolveType(); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(value, sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(value, sourceText(), ParamOrdinal.DEFAULT); } public Expression value() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java index 227ca9b8db3d1..9364f5f4fc53f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/TopHits.java @@ -5,16 +5,15 @@ */ package org.elasticsearch.xpack.sql.expression.function.aggregate; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; -import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Collections; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNotFoldable; /** * Super class of Aggregation functions on field types other than numeric, that need to be @@ -37,29 +36,25 @@ public DataType dataType() { @Override protected TypeResolution resolveType() { - if (field().foldable()) { - return new TypeResolution(format(null, "First argument of [{}] must be a table column, found constant [{}]", - functionName(), - Expressions.name(field()))); + TypeResolution resolution = isNotFoldable(field(), sourceText(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - try { - ((FieldAttribute) field()).exactAttribute(); - } catch (MappingException ex) { - return new TypeResolution(format(null, "[{}] cannot operate on first argument field of data type [{}]", - functionName(), field().dataType().typeName)); + + resolution = TypeResolutions.isExact(field(), sourceText(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } if (orderField() != null) { - if (orderField().foldable()) { - return new TypeResolution(format(null, "Second argument of [{}] must be a table column, found constant [{}]", - functionName(), - Expressions.name(orderField()))); + resolution = isNotFoldable(orderField(), sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } - try { - ((FieldAttribute) orderField()).exactAttribute(); - } catch (MappingException ex) { - return new TypeResolution(format(null, "[{}] cannot operate on second argument field of data type [{}]", - functionName(), orderField().dataType().typeName)); + + resolution = TypeResolutions.isExact(orderField(), sourceText(), ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; } } return TypeResolution.TYPE_RESOLVED; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java index 23061bfea1859..9cb752de5e69a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/grouping/Histogram.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.expression.function.grouping; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -20,6 +19,10 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumericOrDate; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isType; + public class Histogram extends GroupingFunction { private final Literal interval; @@ -41,13 +44,13 @@ public ZoneId zoneId() { @Override protected TypeResolution resolveType() { - TypeResolution resolution = Expressions.typeMustBeNumericOrDate(field(), "HISTOGRAM", ParamOrdinal.FIRST); + TypeResolution resolution = isNumericOrDate(field(), "HISTOGRAM", ParamOrdinal.FIRST); if (resolution == TypeResolution.TYPE_RESOLVED) { // interval must be Literal interval if (field().dataType().isDateBased()) { - resolution = Expressions.typeMustBe(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval"); + resolution = isType(interval, DataTypes::isInterval, "(Date) HISTOGRAM", ParamOrdinal.SECOND, "interval"); } else { - resolution = Expressions.typeMustBeNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND); + resolution = isNumeric(interval, "(Numeric) HISTOGRAM", ParamOrdinal.SECOND); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index e605b82d6b9ed..f5e1a3ece38e9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor; @@ -68,7 +69,6 @@ public static List getNamedWriteables() { // arithmetic entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); - entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); // comparators entries.add(new Entry(Processor.class, BinaryComparisonProcessor.NAME, BinaryComparisonProcessor::new)); entries.add(new Entry(Processor.class, InProcessor.NAME, InProcessor::new)); @@ -82,6 +82,8 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, NonIsoDateTimeProcessor.NAME, NonIsoDateTimeProcessor::new)); entries.add(new Entry(Processor.class, QuarterProcessor.NAME, QuarterProcessor::new)); // math + entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); + entries.add(new Entry(Processor.class, BinaryOptionalMathProcessor.NAME, BinaryOptionalMathProcessor::new)); entries.add(new Entry(Processor.class, MathProcessor.NAME, MathProcessor::new)); // string entries.add(new Entry(Processor.class, StringProcessor.NAME, StringProcessor::new)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java index fa949007ef58a..cae78a42e55e9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.tree.NodeInfo; @@ -17,6 +16,8 @@ import java.time.ZonedDateTime; import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isDate; + abstract class BaseDateTimeFunction extends UnaryScalarFunction { private final ZoneId zoneId; @@ -35,7 +36,7 @@ protected final NodeInfo info() { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeDate(field(), sourceText(), ParamOrdinal.DEFAULT); + return isDate(field(), sourceText(), ParamOrdinal.DEFAULT); } public ZoneId zoneId() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java index 8224ef090b78c..52ce827968fb7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTime.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -15,6 +14,8 @@ import java.time.ZonedDateTime; +import static org.elasticsearch.xpack.sql.util.DateUtils.getNanoPrecision; + public class CurrentDateTime extends CurrentFunction { private final Expression precision; @@ -34,13 +35,6 @@ protected NodeInfo info() { } static ZonedDateTime nanoPrecision(ZonedDateTime zdt, Expression precisionExpression) { - int precision = precisionExpression != null ? Foldables.intValueOf(precisionExpression) : 0; - int nano = zdt.getNano(); - if (precision >= 0 && precision < 10) { - // remove the remainder - nano = nano - nano % (int) Math.pow(10, (9 - precision)); - return zdt.withNano(nano); - } - return zdt; + return zdt.withNano(getNanoPrecision(precisionExpression, zdt.getNano())); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java index 397f84b4cf840..f66dcf185fcb7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java @@ -25,26 +25,7 @@ public enum BinaryMathOperation implements BiFunction { ATAN2((l, r) -> Math.atan2(l.doubleValue(), r.doubleValue())), MOD(Arithmetics::mod), - POWER((l, r) -> Math.pow(l.doubleValue(), r.doubleValue())), - ROUND((l, r) -> { - if (r instanceof Float || r instanceof Double) { - throw new SqlIllegalArgumentException("An integer number is required; received [{}] as second parameter", r); - } - - double tenAtScale = Math.pow(10., r.longValue()); - double middleResult = l.doubleValue() * tenAtScale; - int sign = middleResult > 0 ? 1 : -1; - return Math.round(Math.abs(middleResult)) / tenAtScale * sign; - }), - TRUNCATE((l, r) -> { - if (r instanceof Float || r instanceof Double) { - throw new SqlIllegalArgumentException("An integer number is required; received [{}] as second parameter", r); - } - - double tenAtScale = Math.pow(10., r.longValue()); - double g = l.doubleValue() * tenAtScale; - return (((l.doubleValue() < 0) ? Math.ceil(g) : Math.floor(g)) / tenAtScale); - }); + POWER((l, r) -> Math.pow(l.doubleValue(), r.doubleValue())); private final BiFunction process; @@ -79,7 +60,7 @@ public String getWriteableName() { @Override protected void checkParameter(Object param) { if (!(param instanceof Number)) { - throw new SqlIllegalArgumentException("A number is required; received {}", param); + throw new SqlIllegalArgumentException("A number is required; received [{}]", param); } } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java index f3369bf14a457..98bb36d175d61 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java @@ -16,6 +16,8 @@ import java.util.Objects; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + public abstract class BinaryNumericFunction extends BinaryScalarFunction { private final BinaryMathOperation operation; @@ -36,12 +38,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = Expressions.typeMustBeNumeric(left(), sourceText(), ParamOrdinal.FIRST); + TypeResolution resolution = isNumeric(left(), sourceText(), ParamOrdinal.FIRST); if (resolution.unresolved()) { return resolution; } - return Expressions.typeMustBeNumeric(right(), sourceText(), ParamOrdinal.SECOND); + return isNumeric(right(), sourceText(), ParamOrdinal.SECOND); } @Override @@ -67,6 +69,6 @@ public boolean equals(Object obj) { BinaryNumericFunction other = (BinaryNumericFunction) obj; return Objects.equals(other.left(), left()) && Objects.equals(other.right(), right()) - && Objects.equals(other.operation, operation); + && Objects.equals(other.operation, operation); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathPipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathPipe.java new file mode 100644 index 0000000000000..0897050c97be3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathPipe.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class BinaryOptionalMathPipe extends Pipe { + + private final Pipe left, right; + private final BinaryOptionalMathOperation operation; + + public BinaryOptionalMathPipe(Source source, Expression expression, Pipe left, Pipe right, BinaryOptionalMathOperation operation) { + super(source, expression, right == null ? Arrays.asList(left) : Arrays.asList(left, right)); + this.left = left; + this.right = right; + this.operation = operation; + } + + @Override + public final Pipe replaceChildren(List newChildren) { + int childrenSize = newChildren.size(); + if (childrenSize > 2 || childrenSize < 1) { + throw new IllegalArgumentException("expected [1 or 2] children but received [" + newChildren.size() + "]"); + } + return replaceChildren(newChildren.get(0), childrenSize == 1 ? null : newChildren.get(1)); + } + + @Override + public final Pipe resolveAttributes(AttributeResolver resolver) { + Pipe newLeft = left.resolveAttributes(resolver); + Pipe newRight = right == null ? right : right.resolveAttributes(resolver); + if (newLeft == left && newRight == right) { + return this; + } + return replaceChildren(newLeft, newRight); + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return right == null ? left.supportedByAggsOnlyQuery() : left.supportedByAggsOnlyQuery() || right.supportedByAggsOnlyQuery(); + } + + @Override + public boolean resolved() { + return left.resolved() && (right == null || right.resolved()); + } + + protected Pipe replaceChildren(Pipe newLeft, Pipe newRight) { + return new BinaryOptionalMathPipe(source(), expression(), newLeft, newRight, operation); + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + left.collectFields(sourceBuilder); + if (right != null) { + right.collectFields(sourceBuilder); + } + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, BinaryOptionalMathPipe::new, expression(), left, right, operation); + } + + @Override + public BinaryOptionalMathProcessor asProcessor() { + return new BinaryOptionalMathProcessor(left.asProcessor(), right == null ? null : right.asProcessor(), operation); + } + + public Pipe right() { + return right; + } + + public Pipe left() { + return left; + } + + public BinaryOptionalMathOperation operation() { + return operation; + } + + @Override + public int hashCode() { + return Objects.hash(left, right, operation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryOptionalMathPipe other = (BinaryOptionalMathPipe) obj; + return Objects.equals(left, other.left) + && Objects.equals(right, other.right) + && Objects.equals(operation, other.operation); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathProcessor.java new file mode 100644 index 0000000000000..dc89b6ce5cff1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalMathProcessor.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.BiFunction; + +/** + * Processor for binary mathematical operations that have a second optional parameter. + */ +public class BinaryOptionalMathProcessor implements Processor { + + public enum BinaryOptionalMathOperation implements BiFunction { + + ROUND((l, r) -> { + double tenAtScale = Math.pow(10., r.longValue()); + double middleResult = l.doubleValue() * tenAtScale; + int sign = middleResult > 0 ? 1 : -1; + return Math.round(Math.abs(middleResult)) / tenAtScale * sign; + }), + TRUNCATE((l, r) -> { + double tenAtScale = Math.pow(10., r.longValue()); + double g = l.doubleValue() * tenAtScale; + return (((l.doubleValue() < 0) ? Math.ceil(g) : Math.floor(g)) / tenAtScale); + }); + + private final BiFunction process; + + BinaryOptionalMathOperation(BiFunction process) { + this.process = process; + } + + @Override + public final Number apply(Number left, Number right) { + if (left == null) { + return null; + } + if (!(left instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received [{}]", left); + } + + if (right != null) { + if (!(right instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received [{}]", right); + } + if (right instanceof Float || right instanceof Double) { + throw new SqlIllegalArgumentException("An integer number is required; received [{}] as second parameter", right); + } + } else { + right = 0; + } + + return process.apply(left, right); + } + } + + private final Processor left, right; + private final BinaryOptionalMathOperation operation; + public static final String NAME = "mob"; + + public BinaryOptionalMathProcessor(Processor left, Processor right, BinaryOptionalMathOperation operation) { + this.left = left; + this.right = right; + this.operation = operation; + } + + public BinaryOptionalMathProcessor(StreamInput in) throws IOException { + left = in.readNamedWriteable(Processor.class); + right = in.readOptionalNamedWriteable(Processor.class); + operation = in.readEnum(BinaryOptionalMathOperation.class); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(left); + out.writeOptionalNamedWriteable(right); + out.writeEnum(operation); + } + + @Override + public Object process(Object input) { + return doProcess(left().process(input), right() == null ? null : right().process(input)); + } + + public Number doProcess(Object left, Object right) { + if (left == null) { + return null; + } + if (!(left instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received [{}]", left); + } + + if (right != null) { + if (!(right instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received [{}]", right); + } + if (right instanceof Float || right instanceof Double) { + throw new SqlIllegalArgumentException("An integer number is required; received [{}] as second parameter", right); + } + } else { + right = 0; + } + + return operation().apply((Number) left, (Number) right); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryOptionalMathProcessor other = (BinaryOptionalMathProcessor) obj; + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()) + && Objects.equals(operation(), other.operation()); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation()); + } + + public Processor left() { + return left; + } + + public Processor right() { + return right; + } + + public BinaryOptionalMathOperation operation() { + return operation; + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalNumericFunction.java new file mode 100644 index 0000000000000..8b6996ab64e26 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryOptionalNumericFunction.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +public abstract class BinaryOptionalNumericFunction extends ScalarFunction { + + private final Expression left, right; + + public BinaryOptionalNumericFunction(Source source, Expression left, Expression right) { + super(source, right != null ? Arrays.asList(left, right) : Arrays.asList(left)); + this.left = left; + this.right = right; + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isNumeric(left, sourceText(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; + + } + + return right == null ? TypeResolution.TYPE_RESOLVED : isInteger(right, sourceText(), ParamOrdinal.SECOND); + } + + @Override + protected Pipe makePipe() { + return new BinaryOptionalMathPipe(source(), this, + Expressions.pipe(left), + right == null ? null : Expressions.pipe(right), + operation()); + } + + protected abstract BinaryOptionalMathOperation operation(); + + @Override + public boolean foldable() { + return left.foldable() + && (right == null || right.foldable()); + } + + @Override + public Object fold() { + return operation().apply((Number) left.fold(), (right == null ? null : (Number) right.fold())); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (right() != null && newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } else if (right() == null && newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + + return replacedChildrenInstance(newChildren); + } + + protected abstract Expression replacedChildrenInstance(List newChildren); + + @Override + public ScriptTemplate asScript() { + ScriptTemplate leftScript = asScript(left); + ScriptTemplate rightScript = asOptionalScript(right); + + return asScriptFrom(leftScript, rightScript); + } + + private ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript) { + return new ScriptTemplate(format(Locale.ROOT, formatTemplate("{sql}.%s(%s,%s)"), + operation().name().toLowerCase(Locale.ROOT), + leftScript.template(), + rightScript.template()), + paramsBuilder() + .script(leftScript.params()).script(rightScript.params()) + .build(), dataType()); + } + + @Override + public DataType dataType() { + return left().dataType(); + } + + protected Expression left() { + return left; + } + + protected Expression right() { + return right; + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + BinaryOptionalNumericFunction other = (BinaryOptionalNumericFunction) obj; + return Objects.equals(other.left(), left()) + && Objects.equals(other.right(), right()) + && Objects.equals(other.operation(), operation()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java index e0555ab0ea3bf..4389e1ac814a9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.math; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; @@ -18,6 +17,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; public abstract class MathFunction extends UnaryScalarFunction { @@ -56,7 +56,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeNumeric(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java index f1aad79f8b4ff..98c2e75906ce0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java @@ -6,11 +6,11 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.math; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Literal; -import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; -import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.List; /** * Function that takes two parameters: one is the field/value itself, the other is a non-floating point numeric @@ -18,24 +18,24 @@ * count digits after the decimal point. If negative, it will round the number till that paramter count * digits before the decimal point, starting at the decimal point. */ -public class Round extends BinaryNumericFunction { +public class Round extends BinaryOptionalNumericFunction { public Round(Source source, Expression left, Expression right) { - super(source, left, right == null ? Literal.of(left.source(), 0) : right, BinaryMathOperation.ROUND); + super(source, left, right); } @Override - protected NodeInfo info() { + protected NodeInfo info() { return NodeInfo.create(this, Round::new, left(), right()); } @Override - protected Round replaceChildren(Expression newLeft, Expression newRight) { - return new Round(source(), newLeft, newRight); + protected BinaryOptionalMathOperation operation() { + return BinaryOptionalMathOperation.ROUND; } @Override - public DataType dataType() { - return left().dataType(); + protected final Expression replacedChildrenInstance(List newChildren) { + return new Round(source(), newChildren.get(0), right() == null ? null : newChildren.get(1)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Truncate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Truncate.java index 3920b8219a647..cf08c6c6eaa1f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Truncate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Truncate.java @@ -6,11 +6,11 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.math; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Literal; -import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; -import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.util.List; /** * Function that takes two parameters: one is the field/value itself, the other is a non-floating point numeric @@ -18,24 +18,24 @@ * parameter count digits after the decimal point. If negative, it will truncate the number till that parameter * count digits before the decimal point, starting at the decimal point. */ -public class Truncate extends BinaryNumericFunction { +public class Truncate extends BinaryOptionalNumericFunction { public Truncate(Source source, Expression left, Expression right) { - super(source, left, right == null ? Literal.of(left.source(), 0) : right, BinaryMathOperation.TRUNCATE); + super(source, left, right); } @Override - protected NodeInfo info() { + protected NodeInfo info() { return NodeInfo.create(this, Truncate::new, left(), right()); } @Override - protected Truncate replaceChildren(Expression newLeft, Expression newRight) { - return new Truncate(source(), newLeft, newRight); + protected BinaryOptionalMathOperation operation() { + return BinaryOptionalMathOperation.TRUNCATE; } - + @Override - public DataType dataType() { - return left().dataType(); + protected final Expression replacedChildrenInstance(List newChildren) { + return new Truncate(source(), newChildren.get(0), right() == null ? null : newChildren.get(1)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java index fd294564b642c..611e86507ee5b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringFunction.java @@ -16,7 +16,7 @@ import java.util.Objects; import java.util.function.BiFunction; -import static org.elasticsearch.xpack.sql.expression.Expressions.typeMustBeString; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; /** @@ -42,7 +42,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = typeMustBeString(left(), sourceText(), ParamOrdinal.FIRST); + TypeResolution resolution = isStringAndExact(left(), sourceText(), ParamOrdinal.FIRST); if (resolution.unresolved()) { return resolution; } @@ -67,7 +67,7 @@ protected String scriptMethodName() { @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java index d9f767d1ce81a..fac0646c2c611 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringNumericFunction.java @@ -12,6 +12,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + /** * A binary string function with a numeric second parameter and a string result */ @@ -26,7 +28,7 @@ public BinaryStringNumericFunction(Source source, Expression left, Expression ri @Override protected TypeResolution resolveSecondParameterInputType(Expression e) { - return Expressions.typeMustBeNumeric(e, sourceText(), Expressions.ParamOrdinal.SECOND); + return isNumeric(e, sourceText(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java index 51189f6a4efff..eaa6ac428c85f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/BinaryStringStringFunction.java @@ -10,6 +10,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; + /** * A binary string function with two string parameters and a numeric result */ @@ -21,7 +23,7 @@ public BinaryStringStringFunction(Source source, Expression left, Expression rig @Override protected TypeResolution resolveSecondParameterInputType(Expression e) { - return Expressions.typeMustBeString(e, sourceText(), Expressions.ParamOrdinal.SECOND); + return isStringAndExact(e, sourceText(), Expressions.ParamOrdinal.SECOND); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java index 1f9833133a98a..4e461d919a93a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Concat.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor.process; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -37,12 +38,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(left(), sourceText(), ParamOrdinal.FIRST); - if (sourceResolution.unresolved()) { - return sourceResolution; + TypeResolution resolution = isStringAndExact(left(), functionName(), ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; } - return Expressions.typeMustBeString(right(), sourceText(), ParamOrdinal.SECOND); + return isStringAndExact(right(), functionName(), ParamOrdinal.SECOND); } @Override @@ -78,7 +79,7 @@ protected NodeInfo info() { @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java index 8e6fc2052928e..8cff98d4c7c80 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Insert.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.InsertFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -46,22 +48,22 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - - TypeResolution startResolution = Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.SECOND); + + TypeResolution startResolution = isNumeric(start, sourceText(), ParamOrdinal.SECOND); if (startResolution.unresolved()) { return startResolution; } - TypeResolution lengthResolution = Expressions.typeMustBeNumeric(length, sourceText(), ParamOrdinal.THIRD); + TypeResolution lengthResolution = isNumeric(length, sourceText(), ParamOrdinal.THIRD); if (lengthResolution.unresolved()) { return lengthResolution; } - return Expressions.typeMustBeString(replacement, sourceText(), ParamOrdinal.FOURTH); + return isStringAndExact(replacement, sourceText(), ParamOrdinal.FOURTH); } @Override @@ -119,7 +121,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java index 042ec1a736373..806e6fab8e465 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Locate.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.LocateFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -48,19 +50,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution patternResolution = Expressions.typeMustBeString(pattern, sourceText(), ParamOrdinal.FIRST); + TypeResolution patternResolution = isStringAndExact(pattern, sourceText(), ParamOrdinal.FIRST); if (patternResolution.unresolved()) { return patternResolution; } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.SECOND); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.SECOND); if (sourceResolution.unresolved()) { return sourceResolution; } - - return start == null ? - TypeResolution.TYPE_RESOLVED : - Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.THIRD); + + return start == null ? TypeResolution.TYPE_RESOLVED : isNumeric(start, sourceText(), ParamOrdinal.THIRD); } @Override @@ -80,7 +80,7 @@ protected NodeInfo info() { public boolean foldable() { return pattern.foldable() && source.foldable() - && (start == null? true : start.foldable()); + && (start == null || start.foldable()); } @Override @@ -122,7 +122,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate patternScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java index cd960f1f3b3ea..a1150fc5d38af 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java @@ -21,6 +21,7 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -44,17 +45,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution patternResolution = Expressions.typeMustBeString(pattern, sourceText(), ParamOrdinal.SECOND); + TypeResolution patternResolution = isStringAndExact(pattern, sourceText(), ParamOrdinal.SECOND); if (patternResolution.unresolved()) { return patternResolution; } - return Expressions.typeMustBeString(replacement, sourceText(), ParamOrdinal.THIRD); + return isStringAndExact(replacement, sourceText(), ParamOrdinal.THIRD); } @Override @@ -107,7 +108,7 @@ private ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplate @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } @@ -124,4 +125,4 @@ public Expression replaceChildren(List newChildren) { return new Replace(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java index 75178e73fce46..a341a6bb8c203 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Substring.java @@ -21,6 +21,8 @@ import java.util.Locale; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor.doProcess; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -45,17 +47,17 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution sourceResolution = Expressions.typeMustBeString(source, sourceText(), ParamOrdinal.FIRST); + TypeResolution sourceResolution = isStringAndExact(source, sourceText(), ParamOrdinal.FIRST); if (sourceResolution.unresolved()) { return sourceResolution; } - TypeResolution startResolution = Expressions.typeMustBeNumeric(start, sourceText(), ParamOrdinal.SECOND); + TypeResolution startResolution = isInteger(start, sourceText(), ParamOrdinal.SECOND); if (startResolution.unresolved()) { return startResolution; } - return Expressions.typeMustBeNumeric(length, sourceText(), ParamOrdinal.THIRD); + return isInteger(length, sourceText(), ParamOrdinal.THIRD); } @Override @@ -107,7 +109,7 @@ protected ScriptTemplate asScriptFrom(ScriptTemplate sourceScript, ScriptTemplat @Override public ScriptTemplate scriptWithField(FieldAttribute field) { return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } @@ -124,4 +126,4 @@ public Expression replaceChildren(List newChildren) { return new Substring(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java index ef3944a9093a4..b2c72f0f5b6d7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; @@ -20,6 +19,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; public abstract class UnaryStringFunction extends UnaryScalarFunction { @@ -43,7 +43,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeString(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isStringAndExact(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override @@ -57,7 +57,7 @@ protected Processor makeProcessor() { public ScriptTemplate scriptWithField(FieldAttribute field) { //TODO change this to use _source instead of the exact form (aka field.keyword for text fields) return new ScriptTemplate(processScript("doc[{}].value"), - paramsBuilder().variable(field.isInexact() ? field.exactAttribute().name() : field.name()).build(), + paramsBuilder().variable(field.exactAttribute().name()).build(), dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java index 5603a29d81d7c..7d9a64e59cdc2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.string; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; @@ -19,6 +18,7 @@ import java.util.Objects; import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isInteger; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; /** @@ -45,7 +45,7 @@ protected TypeResolution resolveType() { if (!childrenResolved()) { return new TypeResolution("Unresolved children"); } - return Expressions.typeMustBeInteger(field(), operation().toString(), ParamOrdinal.DEFAULT); + return isInteger(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index f56181bae13de..4d77243230cf7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor.BinaryStringNumericOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor.BinaryStringStringOperation; @@ -197,11 +198,11 @@ public static Object sub(Object left, Object right) { } public static Number round(Number v, Number s) { - return BinaryMathOperation.ROUND.apply(v, s); + return BinaryOptionalMathOperation.ROUND.apply(v, s); } public static Number truncate(Number v, Number s) { - return BinaryMathOperation.TRUNCATE.apply(v, s); + return BinaryOptionalMathOperation.TRUNCATE.apply(v, s); } public static Double abs(Number value) { @@ -219,6 +220,10 @@ public static Double asin(Number value) { public static Double atan(Number value) { return MathOperation.ATAN.apply(value); } + + public static Number atan2(Number left, Number right) { + return BinaryMathOperation.ATAN2.apply(left, right); + } public static Double cbrt(Number value) { return MathOperation.CBRT.apply(value); @@ -271,6 +276,10 @@ public static Double log10(Number value) { public static Double pi(Number value) { return MathOperation.PI.apply(value); } + + public static Number power(Number left, Number right) { + return BinaryMathOperation.POWER.apply(left, right); + } public static Double radians(Number value) { return MathOperation.RADIANS.apply(value); @@ -348,7 +357,7 @@ public static Integer weekOfYear(Object dateTime, String tzId) { public static ZonedDateTime asDateTime(Object dateTime) { return (ZonedDateTime) asDateTime(dateTime, false); } - + private static Object asDateTime(Object dateTime, boolean lenient) { if (dateTime == null) { return null; @@ -363,7 +372,10 @@ private static Object asDateTime(Object dateTime, boolean lenient) { if (dateTime instanceof Number) { return DateUtils.asDateTime(((Number) dateTime).longValue()); } - + + if (dateTime instanceof String) { + return DateUtils.asDateTime(dateTime.toString()); + } throw new SqlIllegalArgumentException("Invalid date encountered [{}]", dateTime); } return dateTime; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessor.java index 21cb72f2dab55..826cf1816ae6b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessor.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.expression.gen.processor; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -16,17 +17,40 @@ public class ConstantProcessor implements Processor { public static String NAME = "c"; private final Object constant; + private final boolean namedWriteable; + private final Class clazz; public ConstantProcessor(Object value) { this.constant = value; + this.namedWriteable = value instanceof NamedWriteable; + this.clazz = namedWriteable ? value.getClass() : null; } + @SuppressWarnings("unchecked") public ConstantProcessor(StreamInput in) throws IOException { - constant = in.readGenericValue(); + namedWriteable = in.readBoolean(); + if (namedWriteable) { + try { + clazz = ConstantProcessor.class.getClassLoader().loadClass(in.readString()); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } + constant = in.readNamedWriteable((Class) clazz); + } else { + clazz = null; + constant = in.readGenericValue(); + } } + @Override public void writeTo(StreamOutput out) throws IOException { - out.writeGenericValue(constant); + out.writeBoolean(namedWriteable); + if (namedWriteable) { + out.writeString(constant.getClass().getName()); + out.writeNamedWriteable((NamedWriteable) constant); + } else { + out.writeGenericValue(constant); + } } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java index 5b75878920243..3832fbda2217a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/ScriptWeaver.java @@ -11,12 +11,16 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunctionAttribute; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; import org.elasticsearch.xpack.sql.expression.literal.IntervalYearMonth; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.DateUtils; + +import java.time.ZonedDateTime; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; @@ -48,11 +52,29 @@ default ScriptTemplate asScript(Expression exp) { throw new SqlIllegalArgumentException("Cannot evaluate script for expression {}", exp); } + /* + * To be used when the function has an optional parameter. + */ + default ScriptTemplate asOptionalScript(Expression exp) { + return exp == null ? asScript(Literal.NULL) : asScript(exp); + } + DataType dataType(); default ScriptTemplate scriptWithFoldable(Expression foldable) { Object fold = foldable.fold(); + + // + // Custom type handling + // + // wrap intervals with dedicated methods for serialization + if (fold instanceof ZonedDateTime) { + ZonedDateTime zdt = (ZonedDateTime) fold; + return new ScriptTemplate(processScript("{sql}.asDateTime({})"), + paramsBuilder().variable(DateUtils.toString(zdt)).build(), dataType()); + } + if (fold instanceof IntervalYearMonth) { IntervalYearMonth iym = (IntervalYearMonth) fold; return new ScriptTemplate(processScript("{sql}.intervalYearMonth({},{})"), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java index c280d974e8850..83a4b96f8295b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java @@ -97,14 +97,19 @@ public static List inCommon(List l, List r) return common.isEmpty() ? emptyList() : common; } - public static List subtract(List from, List r) { - List diff = new ArrayList<>(Math.min(from.size(), r.size())); - for (Expression lExp : from) { - for (Expression rExp : r) { - if (!lExp.semanticEquals(rExp)) { - diff.add(lExp); + public static List subtract(List from, List list) { + List diff = new ArrayList<>(Math.min(from.size(), list.size())); + for (Expression f : from) { + boolean found = false; + for (Expression l : list) { + if (f.semanticEquals(l)) { + found = true; + break; } } + if (found == false) { + diff.add(f); + } } return diff.isEmpty() ? emptyList() : diff; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java index ecc5835d1aaab..569594e1ed693 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ArbitraryConditionalFunction.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.expression.predicate.conditional.ConditionalProcessor.ConditionalOperation; import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypeConversion; import java.util.ArrayList; @@ -35,6 +36,7 @@ public abstract class ArbitraryConditionalFunction extends ConditionalFunction { @Override protected TypeResolution resolveType() { + dataType = DataType.NULL; for (Expression e : children()) { dataType = DataTypeConversion.commonType(dataType, e.dataType()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java index 13b765e941ce0..8068818938036 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalFunction.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; import java.util.List; @@ -20,7 +21,7 @@ */ public abstract class ConditionalFunction extends ScalarFunction { - protected DataType dataType = DataType.NULL; + protected DataType dataType = null; ConditionalFunction(Source source, List fields) { super(source, fields); @@ -28,6 +29,12 @@ public abstract class ConditionalFunction extends ScalarFunction { @Override public DataType dataType() { + if (dataType == null) { + dataType = DataType.NULL; + for (Expression exp : children()) { + dataType = DataTypeConversion.commonType(dataType, exp.dataType()); + } + } return dataType; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java index 930636657fc48..dfc5232cddd2d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogic.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isBoolean; + public abstract class BinaryLogic extends BinaryOperator { protected BinaryLogic(Source source, Expression left, Expression right, BinaryLogicOperation operation) { @@ -27,7 +29,7 @@ public DataType dataType() { @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return Expressions.typeMustBeBoolean(e, sourceText(), paramOrdinal); + return isBoolean(e, sourceText(), paramOrdinal); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java index bbadaa6ef6430..88e20d187f343 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.predicate.logical; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -16,6 +15,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isBoolean; + public class Not extends UnaryScalarFunction { public Not(Source source, Expression child) { @@ -37,7 +38,7 @@ protected TypeResolution resolveType() { if (DataType.BOOLEAN == field().dataType()) { return TypeResolution.TYPE_RESOLVED; } - return Expressions.typeMustBeBoolean(field(), sourceText(), ParamOrdinal.DEFAULT); + return isBoolean(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java index d1d28e3683863..157f34dc404a2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/ArithmeticOperation.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + public abstract class ArithmeticOperation extends BinaryOperator { private DataType dataType; @@ -24,7 +26,7 @@ protected ArithmeticOperation(Source source, Expression left, Expression right, @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return Expressions.typeMustBeNumeric(e, symbol(), paramOrdinal); + return isNumeric(e, sourceText(), paramOrdinal); } @Override @@ -44,4 +46,4 @@ public DataType dataType() { protected Pipe makePipe() { return new BinaryArithmeticPipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right()), function()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java index a0fd57e30d0ca..3c89f2cf275f8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/BinaryArithmeticProcessor.java @@ -162,7 +162,7 @@ protected Object doProcess(Object left, Object right) { return null; } - if (f == BinaryArithmeticOperation.MUL || f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { + if (f == BinaryArithmeticOperation.DIV || f == BinaryArithmeticOperation.MOD) { if (!(left instanceof Number)) { throw new SqlIllegalArgumentException("A number is required; received {}", left); } @@ -174,8 +174,8 @@ protected Object doProcess(Object left, Object right) { return f.apply(left, right); } - if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB) { - return f.apply(left, right); + if (f == BinaryArithmeticOperation.ADD || f == BinaryArithmeticOperation.SUB || f == BinaryArithmeticOperation.MUL) { + return f.apply(left, right); } // this should not occur diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 5be5e28718459..efdfc0fbe6509 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -43,7 +43,7 @@ protected TypeResolution resolveType() { // 2. 3. 4. intervals if ((DataTypes.isInterval(l) || DataTypes.isInterval(r))) { if (DataTypeConversion.commonType(l, r) == null) { - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } else { return resolveWithIntervals(); } @@ -54,6 +54,12 @@ protected TypeResolution resolveType() { } protected TypeResolution resolveWithIntervals() { + DataType l = left().dataType(); + DataType r = right().dataType(); + + if (!(r.isDateBased() || DataTypes.isInterval(r))|| !(l.isDateBased() || DataTypes.isInterval(l))) { + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + } return TypeResolution.TYPE_RESOLVED; } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java index 7a09bbedebfa3..e3fa7ac1031f7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Mul.java @@ -47,7 +47,7 @@ protected TypeResolution resolveType() { return TypeResolution.TYPE_RESOLVED; } - return new TypeResolution(format("[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); + return new TypeResolution(format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), l, r)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java index 3ada9a523a13c..a8c7f87d6380d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; @@ -16,6 +15,8 @@ import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isNumeric; + /** * Negation function (@{code -x}). */ @@ -37,7 +38,7 @@ protected Neg replaceChild(Expression newChild) { @Override protected TypeResolution resolveType() { - return Expressions.typeMustBeNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); + return isNumeric(field(), sourceText(), ParamOrdinal.DEFAULT); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java index cad2d7ffa625a..d448a054df761 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Sub.java @@ -34,7 +34,11 @@ protected Sub replaceChildren(Expression newLeft, Expression newRight) { @Override protected TypeResolution resolveWithIntervals() { - if (right().dataType().isDateBased() && DataTypes.isInterval(left().dataType())) { + TypeResolution resolution = super.resolveWithIntervals(); + if (resolution.unresolved()) { + return resolution; + } + if ((right().dataType().isDateBased()) && DataTypes.isInterval(left().dataType())) { return new TypeResolution(format(null, "Cannot subtract a {}[{}] from an interval[{}]; do you mean the reverse?", right().dataType().typeName, right().source().text(), left().source().text())); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java index 4e7473907c890..fdd33af077b40 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/BinaryComparison.java @@ -7,6 +7,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation; @@ -22,7 +23,7 @@ protected BinaryComparison(Source source, Expression left, Expression right, Bin @Override protected TypeResolution resolveInputType(Expression e, Expressions.ParamOrdinal paramOrdinal) { - return TypeResolution.TYPE_RESOLVED; + return TypeResolutions.isExact(e, sourceText(), paramOrdinal); } @Override @@ -43,4 +44,4 @@ protected Pipe makePipe() { public static Integer compare(Object left, Object right) { return Comparisons.compare(left, right); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java index f8f0bb35b504e..e687c9ac1baee 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Foldables; import org.elasticsearch.xpack.sql.expression.Nullability; +import org.elasticsearch.xpack.sql.expression.TypeResolutions; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; @@ -105,6 +106,23 @@ protected Pipe makePipe() { return new InPipe(source(), this, children().stream().map(Expressions::pipe).collect(Collectors.toList())); } + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = TypeResolutions.isExact(value, functionName(), Expressions.ParamOrdinal.DEFAULT); + if (resolution != TypeResolution.TYPE_RESOLVED) { + return resolution; + } + + for (Expression ex : list) { + if (ex.foldable() == false) { + return new TypeResolution(format(null, "Comparisons against variables are not (currently) supported; offender [{}] in [{}]", + Expressions.name(ex), + name())); + } + } + return super.resolveType(); + } + @Override public int hashCode() { return Objects.hash(value, list); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java index 72c486cf65eba..e67f4943445af 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java @@ -6,29 +6,35 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; -public class Like extends RegexMatch { - - private final LikePattern pattern; +public class Like extends RegexMatch { public Like(Source source, Expression left, LikePattern pattern) { - super(source, left, pattern.asJavaRegex()); - this.pattern = pattern; - } - - public LikePattern pattern() { - return pattern; + super(source, left, pattern); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Like::new, field(), pattern); + return NodeInfo.create(this, Like::new, field(), pattern()); } @Override protected Like replaceChild(Expression newLeft) { - return new Like(source(), newLeft, pattern); + return new Like(source(), newLeft, pattern()); + } + + @Override + public Boolean fold() { + Object val = field().fold(); + return RegexOperation.match(val, pattern().asJavaRegex()); + } + + @Override + protected Processor makeProcessor() { + return new RegexProcessor(pattern().asJavaRegex()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java index b925bd769ea4c..187eda3fdece1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java @@ -6,29 +6,35 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; -public class RLike extends RegexMatch { +public class RLike extends RegexMatch { - private final String pattern; - - public RLike(Source source, Expression left, String pattern) { - super(source, left, pattern); - this.pattern = pattern; - } - - public String pattern() { - return pattern; + public RLike(Source source, Expression value, String pattern) { + super(source, value, pattern); } @Override protected NodeInfo info() { - return NodeInfo.create(this, RLike::new, field(), pattern); + return NodeInfo.create(this, RLike::new, field(), pattern()); } @Override protected RLike replaceChild(Expression newChild) { - return new RLike(source(), newChild, pattern); + return new RLike(source(), newChild, pattern()); + } + + @Override + public Boolean fold() { + Object val = field().fold(); + return RegexOperation.match(val, pattern()); + } + + @Override + protected Processor makeProcessor() { + return new RegexProcessor(pattern()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java index ed65b1fcaf9cb..82c6d570f39f5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java @@ -7,21 +7,28 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Nullability; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; -public abstract class RegexMatch extends UnaryScalarFunction { +import java.util.Objects; - private final String pattern; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; - protected RegexMatch(Source source, Expression value, String pattern) { +public abstract class RegexMatch extends UnaryScalarFunction { + + private final T pattern; + + protected RegexMatch(Source source, Expression value, T pattern) { super(source, value); this.pattern = pattern; } + + public T pattern() { + return pattern; + } @Override public DataType dataType() { @@ -30,26 +37,30 @@ public DataType dataType() { @Override public Nullability nullable() { - if (pattern == null) { + if (pattern() == null) { return Nullability.TRUE; } return field().nullable(); } + @Override + protected TypeResolution resolveType() { + return isStringAndExact(field(), sourceText(), Expressions.ParamOrdinal.DEFAULT); + } + @Override public boolean foldable() { // right() is not directly foldable in any context but Like can fold it. return field().foldable(); } - + @Override - public Boolean fold() { - Object val = field().fold(); - return RegexOperation.match(val, pattern); + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(((RegexMatch) obj).pattern(), pattern()); } @Override - protected Processor makeProcessor() { - return new RegexProcessor(pattern); + public int hashCode() { + return Objects.hash(super.hashCode(), pattern()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 0e6a88a90703e..eafdf21b11930 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -94,6 +94,7 @@ import java.util.Set; import java.util.function.Consumer; +import static org.elasticsearch.xpack.sql.expression.Expressions.equalsAsAttribute; import static org.elasticsearch.xpack.sql.expression.Literal.FALSE; import static org.elasticsearch.xpack.sql.expression.Literal.TRUE; import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.combineAnd; @@ -148,6 +149,7 @@ protected Iterable.Batch> batches() { Batch aggregate = new Batch("Aggregation Rewrite", //new ReplaceDuplicateAggsWithReferences(), + new ReplaceMinMaxWithTopHits(), new ReplaceAggsWithMatrixStats(), new ReplaceAggsWithExtendedStats(), new ReplaceAggsWithStats(), @@ -162,6 +164,7 @@ protected Iterable.Batch> batches() { ); //new BalanceBooleanTrees()); Batch label = new Batch("Set as Optimized", Limiter.ONCE, + CleanAliases.INSTANCE, new SetAsOptimized()); return Arrays.asList(operators, aggregate, local, label); @@ -884,7 +887,26 @@ protected LogicalPlan rule(OrderBy ob) { for (Order o : nonConstant) { Expression fieldToOrder = o.child(); for (Expression group : a.groupings()) { - if (Expressions.equalsAsAttribute(fieldToOrder, group)) { + Holder isMatching = new Holder<>(Boolean.FALSE); + if (equalsAsAttribute(fieldToOrder, group)) { + isMatching.set(Boolean.TRUE); + } else { + a.aggregates().forEach(alias -> { + if (alias instanceof Alias) { + Expression child = ((Alias) alias).child(); + // Check if the groupings (a, y) match the orderings (b, x) through the aggregates' aliases (x, y) + // e.g. SELECT a AS x, b AS y ... GROUP BY a, y ORDER BY b, x + if ((equalsAsAttribute(child, group) + && (equalsAsAttribute(alias, fieldToOrder) || equalsAsAttribute(child, fieldToOrder))) + || (equalsAsAttribute(alias, group) + && (equalsAsAttribute(alias, fieldToOrder) || equalsAsAttribute(child, fieldToOrder)))) { + isMatching.set(Boolean.TRUE); + } + } + }); + } + + if (isMatching.get() == true) { // move grouping in front groupings.remove(group); groupings.add(0, group); @@ -924,43 +946,22 @@ public LogicalPlan apply(LogicalPlan plan) { protected LogicalPlan rule(LogicalPlan plan) { final Map replacedCast = new LinkedHashMap<>(); - // first eliminate casts inside Aliases + // eliminate redundant casts LogicalPlan transformed = plan.transformExpressionsUp(e -> { - // cast wrapped in an alias - if (e instanceof Alias) { - Alias as = (Alias) e; - if (as.child() instanceof Cast) { - Cast c = (Cast) as.child(); - - if (c.from() == c.to()) { - Alias newAs = new Alias(as.source(), as.name(), as.qualifier(), c.field(), as.id(), as.synthetic()); - replacedCast.put(as.toAttribute(), newAs.toAttribute()); - return newAs; - } - } - return e; - } - return e; - }); - - // then handle stand-alone casts (mixed together the cast rule will kick in before the alias) - transformed = transformed.transformExpressionsUp(e -> { if (e instanceof Cast) { Cast c = (Cast) e; if (c.from() == c.to()) { Expression argument = c.field(); - if (argument instanceof NamedExpression) { - replacedCast.put(c.toAttribute(), ((NamedExpression) argument).toAttribute()); - } + Alias as = new Alias(c.source(), c.sourceText(), argument); + replacedCast.put(c.toAttribute(), as.toAttribute()); - return argument; + return as; } } return e; }); - // replace attributes from previous removed Casts if (!replacedCast.isEmpty()) { return transformed.transformUp(p -> { @@ -1235,7 +1236,7 @@ static class BooleanSimplification extends OptimizerExpressionRule { @Override protected Expression rule(Expression e) { - if (e instanceof BinaryPredicate) { + if (e instanceof And || e instanceof Or) { return simplifyAndOr((BinaryPredicate) e); } if (e instanceof Not) { @@ -1889,7 +1890,7 @@ static class SkipQueryOnLimitZero extends OptimizerRule { @Override protected LogicalPlan rule(Limit limit) { if (limit.limit() instanceof Literal) { - if (Integer.valueOf(0).equals((((Literal) limit.limit()).fold()))) { + if (Integer.valueOf(0).equals((limit.limit().fold()))) { return new LocalRelation(limit.source(), new EmptyExecutable(limit.output())); } } @@ -1900,21 +1901,30 @@ protected LogicalPlan rule(Limit limit) { static class SkipQueryIfFoldingProjection extends OptimizerRule { @Override protected LogicalPlan rule(LogicalPlan plan) { - if (plan instanceof Project) { - Project p = (Project) plan; + Holder optimizedPlan = new Holder<>(); + plan.forEachDown(p -> { List values = extractConstants(p.projections()); if (values.size() == p.projections().size() && !(p.child() instanceof EsRelation) && isNotQueryWithFromClauseAndFilterFoldedToFalse(p)) { - return new LocalRelation(p.source(), new SingletonExecutable(p.output(), values.toArray())); + optimizedPlan.set(new LocalRelation(p.source(), new SingletonExecutable(p.output(), values.toArray()))); } + }, Project.class); + + if (optimizedPlan.get() != null) { + return optimizedPlan.get(); } - if (plan instanceof Aggregate) { - Aggregate a = (Aggregate) plan; + + plan.forEachDown(a -> { List values = extractConstants(a.aggregates()); if (values.size() == a.aggregates().size() && isNotQueryWithFromClauseAndFilterFoldedToFalse(a)) { - return new LocalRelation(a.source(), new SingletonExecutable(a.output(), values.toArray())); + optimizedPlan.set(new LocalRelation(a.source(), new SingletonExecutable(a.output(), values.toArray()))); } + }, Aggregate.class); + + if (optimizedPlan.get() != null) { + return optimizedPlan.get(); } + return plan; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index 87709ac104e08..ba2a39069953a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -146,12 +146,11 @@ public SysTables visitSysTables(SysTablesContext ctx) { boolean legacyTableType = false; for (StringContext string : ctx.string()) { String value = string(string); - if (value != null) { + if (value != null && value.isEmpty() == false) { // check special ODBC wildcard case if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) { - // convert % to enumeration - // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 - types.addAll(IndexType.VALID); + // treat % as null + // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments } // special case for legacy apps (like msquery) that always asks for 'TABLE' // which we manually map to all concrete tables supported diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index fe8f5ac9925b1..e23fe85926bd2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -109,17 +109,13 @@ import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.DataTypeConversion; import org.elasticsearch.xpack.sql.type.DataTypes; -import org.elasticsearch.xpack.sql.util.DateUtils; import org.elasticsearch.xpack.sql.util.StringUtils; -import org.joda.time.DateTime; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.DateTimeFormatterBuilder; -import org.joda.time.format.ISODateTimeFormat; import java.time.Duration; +import java.time.LocalTime; import java.time.Period; +import java.time.format.DateTimeParseException; import java.time.temporal.TemporalAmount; import java.util.EnumSet; import java.util.List; @@ -127,9 +123,12 @@ import java.util.Map; import java.util.StringJoiner; +import static java.time.format.DateTimeFormatter.ISO_LOCAL_TIME; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.sql.type.DataTypeConversion.conversionFor; +import static org.elasticsearch.xpack.sql.util.DateUtils.asDateOnly; +import static org.elasticsearch.xpack.sql.util.DateUtils.ofEscapedLiteral; abstract class ExpressionBuilder extends IdentifierBuilder { @@ -387,44 +386,12 @@ public Order visitOrderBy(OrderByContext ctx) { @Override public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { - String type = visitIdentifier(ctx.identifier()).toLowerCase(Locale.ROOT); - - switch (type) { - case "bit": - case "bool": - case "boolean": - return DataType.BOOLEAN; - case "tinyint": - case "byte": - return DataType.BYTE; - case "smallint": - case "short": - return DataType.SHORT; - case "int": - case "integer": - return DataType.INTEGER; - case "long": - case "bigint": - return DataType.LONG; - case "real": - return DataType.FLOAT; - case "float": - case "double": - return DataType.DOUBLE; - case "date": - return DataType.DATE; - case "datetime": - case "timestamp": - return DataType.DATETIME; - case "char": - case "varchar": - case "string": - return DataType.KEYWORD; - case "ip": - return DataType.IP; - default: - throw new ParsingException(source(ctx), "Does not recognize type {}", type); + String type = visitIdentifier(ctx.identifier()); + DataType dataType = DataType.fromSqlOrEsType(type); + if (dataType == null) { + throw new ParsingException(source(ctx), "Does not recognize type [{}]", type); } + return dataType; } // @@ -468,28 +435,13 @@ public Object visitBuiltinDateTimeFunction(BuiltinDateTimeFunctionContext ctx) { // maps CURRENT_XXX to its respective function e.g: CURRENT_TIMESTAMP() // since the functions need access to the Configuration, the parser only registers the definition and not the actual function Source source = source(ctx); - Literal p = null; - - if (ctx.precision != null) { - try { - Source pSource = source(ctx.precision); - short safeShort = DataTypeConversion.safeToShort(StringUtils.parseLong(ctx.precision.getText())); - if (safeShort > 9 || safeShort < 0) { - throw new ParsingException(pSource, "Precision needs to be between [0-9], received [{}]", safeShort); - } - p = Literal.of(pSource, Short.valueOf(safeShort)); - } catch (SqlIllegalArgumentException siae) { - throw new ParsingException(source, siae.getMessage()); - } - } - String functionName = ctx.name.getText(); switch (ctx.name.getType()) { + case SqlBaseLexer.CURRENT_TIMESTAMP: + return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, emptyList()); case SqlBaseLexer.CURRENT_DATE: return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, emptyList()); - case SqlBaseLexer.CURRENT_TIMESTAMP: - return new UnresolvedFunction(source, functionName, ResolutionType.STANDARD, p != null ? singletonList(p) : emptyList()); default: throw new ParsingException(source, "Unknown function [{}]", functionName); } @@ -791,13 +743,11 @@ public Literal visitDateEscapedLiteral(DateEscapedLiteralContext ctx) { String string = string(ctx.string()); Source source = source(ctx); // parse yyyy-MM-dd - DateTime dt = null; try { - dt = ISODateTimeFormat.date().parseDateTime(string); - } catch(IllegalArgumentException ex) { + return new Literal(source, asDateOnly(string), DataType.DATE); + } catch(DateTimeParseException ex) { throw new ParsingException(source, "Invalid date received; {}", ex.getMessage()); } - return new Literal(source, DateUtils.asDateOnly(dt), DataType.DATE); } @Override @@ -806,10 +756,10 @@ public Literal visitTimeEscapedLiteral(TimeEscapedLiteralContext ctx) { Source source = source(ctx); // parse HH:mm:ss - DateTime dt = null; + LocalTime lt = null; try { - dt = ISODateTimeFormat.hourMinuteSecond().parseDateTime(string); - } catch (IllegalArgumentException ex) { + lt = LocalTime.parse(string, ISO_LOCAL_TIME); + } catch (DateTimeParseException ex) { throw new ParsingException(source, "Invalid time received; {}", ex.getMessage()); } @@ -822,18 +772,11 @@ public Literal visitTimestampEscapedLiteral(TimestampEscapedLiteralContext ctx) Source source = source(ctx); // parse yyyy-mm-dd hh:mm:ss(.f...) - DateTime dt = null; try { - DateTimeFormatter formatter = new DateTimeFormatterBuilder() - .append(ISODateTimeFormat.date()) - .appendLiteral(" ") - .append(ISODateTimeFormat.hourMinuteSecondFraction()) - .toFormatter(); - dt = formatter.parseDateTime(string); - } catch (IllegalArgumentException ex) { + return new Literal(source, ofEscapedLiteral(string), DataType.DATETIME); + } catch (DateTimeParseException ex) { throw new ParsingException(source, "Invalid timestamp received; {}", ex.getMessage()); } - return new Literal(source, DateUtils.asDateTime(dt), DataType.DATETIME); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java index a62c5b4083fa3..bc056529a7cc3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java @@ -736,37 +736,37 @@ class SqlBaseBaseListener implements SqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterCastExpression(SqlBaseParser.CastExpressionContext ctx) { } + @Override public void enterBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitCastExpression(SqlBaseParser.CastExpressionContext ctx) { } + @Override public void exitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterCastTemplate(SqlBaseParser.CastTemplateContext ctx) { } + @Override public void enterCastExpression(SqlBaseParser.CastExpressionContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitCastTemplate(SqlBaseParser.CastTemplateContext ctx) { } + @Override public void exitCastExpression(SqlBaseParser.CastExpressionContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { } + @Override public void enterCastTemplate(SqlBaseParser.CastTemplateContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { } + @Override public void exitCastTemplate(SqlBaseParser.CastTemplateContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java index 13722407570a7..2e161c053ee6f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java @@ -437,21 +437,21 @@ class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitCastExpression(SqlBaseParser.CastExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitCastTemplate(SqlBaseParser.CastTemplateContext ctx) { return visitChildren(ctx); } + @Override public T visitCastExpression(SqlBaseParser.CastExpressionContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx) { return visitChildren(ctx); } + @Override public T visitCastTemplate(SqlBaseParser.CastTemplateContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java index 7b5a8ea5fbad9..951f969199bcb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java @@ -681,6 +681,16 @@ interface SqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. + * @param ctx the parse tree + */ + void enterBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. + * @param ctx the parse tree + */ + void exitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); /** * Enter a parse tree produced by {@link SqlBaseParser#castExpression}. * @param ctx the parse tree @@ -701,16 +711,6 @@ interface SqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitCastTemplate(SqlBaseParser.CastTemplateContext ctx); - /** - * Enter a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. - * @param ctx the parse tree - */ - void enterBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); - /** - * Exit a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. - * @param ctx the parse tree - */ - void exitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); /** * Enter a parse tree produced by {@link SqlBaseParser#convertTemplate}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index a690169409e81..42129c501a19b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -1,13 +1,27 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.xpack.sql.parser; -import org.antlr.v4.runtime.atn.*; + +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.misc.*; -import org.antlr.v4.runtime.tree.*; +import org.antlr.v4.runtime.tree.ParseTreeListener; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; + import java.util.List; -import java.util.Iterator; -import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) class SqlBaseParser extends Parser { @@ -46,7 +60,7 @@ class SqlBaseParser extends Parser { RULE_expression = 21, RULE_booleanExpression = 22, RULE_matchQueryOptions = 23, RULE_predicated = 24, RULE_predicate = 25, RULE_likePattern = 26, RULE_pattern = 27, RULE_patternEscape = 28, RULE_valueExpression = 29, RULE_primaryExpression = 30, - RULE_castExpression = 31, RULE_castTemplate = 32, RULE_builtinDateTimeFunction = 33, + RULE_builtinDateTimeFunction = 31, RULE_castExpression = 32, RULE_castTemplate = 33, RULE_convertTemplate = 34, RULE_extractExpression = 35, RULE_extractTemplate = 36, RULE_functionExpression = 37, RULE_functionTemplate = 38, RULE_functionName = 39, RULE_constant = 40, RULE_comparisonOperator = 41, RULE_booleanValue = 42, @@ -60,8 +74,8 @@ class SqlBaseParser extends Parser { "selectItem", "relation", "joinRelation", "joinType", "joinCriteria", "relationPrimary", "expression", "booleanExpression", "matchQueryOptions", "predicated", "predicate", "likePattern", "pattern", "patternEscape", - "valueExpression", "primaryExpression", "castExpression", "castTemplate", - "builtinDateTimeFunction", "convertTemplate", "extractExpression", "extractTemplate", + "valueExpression", "primaryExpression", "builtinDateTimeFunction", "castExpression", + "castTemplate", "convertTemplate", "extractExpression", "extractTemplate", "functionExpression", "functionTemplate", "functionName", "constant", "comparisonOperator", "booleanValue", "interval", "intervalField", "dataType", "qualifiedName", "identifier", "tableIdentifier", "quoteIdentifier", "unquoteIdentifier", @@ -767,6 +781,8 @@ public final StatementContext statement() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -843,6 +859,8 @@ public final StatementContext statement() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -913,6 +931,8 @@ public final StatementContext statement() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -1086,6 +1106,8 @@ public final StatementContext statement() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -4199,7 +4221,7 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce { setState(576); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { { setState(573); qualifiedName(); @@ -4265,6 +4287,64 @@ public final PrimaryExpressionContext primaryExpression() throws RecognitionExce return _localctx; } + public static class BuiltinDateTimeFunctionContext extends ParserRuleContext { + public Token name; + public TerminalNode CURRENT_TIMESTAMP() { return getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0); } + public TerminalNode CURRENT_DATE() { return getToken(SqlBaseParser.CURRENT_DATE, 0); } + public BuiltinDateTimeFunctionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_builtinDateTimeFunction; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBuiltinDateTimeFunction(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBuiltinDateTimeFunction(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBuiltinDateTimeFunction(this); + else return visitor.visitChildren(this); + } + } + + public final BuiltinDateTimeFunctionContext builtinDateTimeFunction() throws RecognitionException { + BuiltinDateTimeFunctionContext _localctx = new BuiltinDateTimeFunctionContext(_ctx, getState()); + enterRule(_localctx, 62, RULE_builtinDateTimeFunction); + try { + setState(593); + switch (_input.LA(1)) { + case CURRENT_TIMESTAMP: + enterOuterAlt(_localctx, 1); + { + setState(591); + ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIMESTAMP); + } + break; + case CURRENT_DATE: + enterOuterAlt(_localctx, 2); + { + setState(592); + ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_DATE); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class CastExpressionContext extends ParserRuleContext { public CastTemplateContext castTemplate() { return getRuleContext(CastTemplateContext.class,0); @@ -4295,44 +4375,44 @@ public T accept(ParseTreeVisitor visitor) { public final CastExpressionContext castExpression() throws RecognitionException { CastExpressionContext _localctx = new CastExpressionContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_castExpression); + enterRule(_localctx, 64, RULE_castExpression); try { - setState(601); + setState(605); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,80,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,81,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(591); + setState(595); castTemplate(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(592); + setState(596); match(FUNCTION_ESC); - setState(593); + setState(597); castTemplate(); - setState(594); + setState(598); match(ESC_END); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(596); + setState(600); convertTemplate(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(597); + setState(601); match(FUNCTION_ESC); - setState(598); + setState(602); convertTemplate(); - setState(599); + setState(603); match(ESC_END); } break; @@ -4379,21 +4459,21 @@ public T accept(ParseTreeVisitor visitor) { public final CastTemplateContext castTemplate() throws RecognitionException { CastTemplateContext _localctx = new CastTemplateContext(_ctx, getState()); - enterRule(_localctx, 64, RULE_castTemplate); + enterRule(_localctx, 66, RULE_castTemplate); try { enterOuterAlt(_localctx, 1); { - setState(603); + setState(607); match(CAST); - setState(604); + setState(608); match(T__0); - setState(605); + setState(609); expression(); - setState(606); + setState(610); match(AS); - setState(607); + setState(611); dataType(); - setState(608); + setState(612); match(T__1); } } @@ -4408,100 +4488,6 @@ public final CastTemplateContext castTemplate() throws RecognitionException { return _localctx; } - public static class BuiltinDateTimeFunctionContext extends ParserRuleContext { - public Token name; - public Token precision; - public TerminalNode CURRENT_DATE() { return getToken(SqlBaseParser.CURRENT_DATE, 0); } - public TerminalNode CURRENT_TIMESTAMP() { return getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0); } - public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } - public BuiltinDateTimeFunctionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_builtinDateTimeFunction; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBuiltinDateTimeFunction(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBuiltinDateTimeFunction(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBuiltinDateTimeFunction(this); - else return visitor.visitChildren(this); - } - } - - public final BuiltinDateTimeFunctionContext builtinDateTimeFunction() throws RecognitionException { - BuiltinDateTimeFunctionContext _localctx = new BuiltinDateTimeFunctionContext(_ctx, getState()); - enterRule(_localctx, 66, RULE_builtinDateTimeFunction); - int _la; - try { - setState(623); - switch (_input.LA(1)) { - case CURRENT_DATE: - enterOuterAlt(_localctx, 1); - { - setState(610); - ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_DATE); - setState(613); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,81,_ctx) ) { - case 1: - { - setState(611); - match(T__0); - setState(612); - match(T__1); - } - break; - } - } - break; - case CURRENT_TIMESTAMP: - enterOuterAlt(_localctx, 2); - { - setState(615); - ((BuiltinDateTimeFunctionContext)_localctx).name = match(CURRENT_TIMESTAMP); - setState(621); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,83,_ctx) ) { - case 1: - { - setState(616); - match(T__0); - setState(618); - _la = _input.LA(1); - if (_la==INTEGER_VALUE) { - { - setState(617); - ((BuiltinDateTimeFunctionContext)_localctx).precision = match(INTEGER_VALUE); - } - } - - setState(620); - match(T__1); - } - break; - } - } - break; - default: - throw new NoViableAltException(this); - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - public static class ConvertTemplateContext extends ParserRuleContext { public TerminalNode CONVERT() { return getToken(SqlBaseParser.CONVERT, 0); } public ExpressionContext expression() { @@ -4535,17 +4521,17 @@ public final ConvertTemplateContext convertTemplate() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(625); + setState(614); match(CONVERT); - setState(626); + setState(615); match(T__0); - setState(627); + setState(616); expression(); - setState(628); + setState(617); match(T__2); - setState(629); + setState(618); dataType(); - setState(630); + setState(619); match(T__1); } } @@ -4589,23 +4575,23 @@ public final ExtractExpressionContext extractExpression() throws RecognitionExce ExtractExpressionContext _localctx = new ExtractExpressionContext(_ctx, getState()); enterRule(_localctx, 70, RULE_extractExpression); try { - setState(637); + setState(626); switch (_input.LA(1)) { case EXTRACT: enterOuterAlt(_localctx, 1); { - setState(632); + setState(621); extractTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(633); + setState(622); match(FUNCTION_ESC); - setState(634); + setState(623); extractTemplate(); - setState(635); + setState(624); match(ESC_END); } break; @@ -4659,17 +4645,17 @@ public final ExtractTemplateContext extractTemplate() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(639); + setState(628); match(EXTRACT); - setState(640); + setState(629); match(T__0); - setState(641); + setState(630); ((ExtractTemplateContext)_localctx).field = identifier(); - setState(642); + setState(631); match(FROM); - setState(643); + setState(632); valueExpression(0); - setState(644); + setState(633); match(T__1); } } @@ -4712,12 +4698,14 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx FunctionExpressionContext _localctx = new FunctionExpressionContext(_ctx, getState()); enterRule(_localctx, 74, RULE_functionExpression); try { - setState(651); + setState(640); switch (_input.LA(1)) { case ANALYZE: case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -4758,18 +4746,18 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(646); + setState(635); functionTemplate(); } break; case FUNCTION_ESC: enterOuterAlt(_localctx, 2); { - setState(647); + setState(636); match(FUNCTION_ESC); - setState(648); + setState(637); functionTemplate(); - setState(649); + setState(638); match(ESC_END); } break; @@ -4827,45 +4815,45 @@ public final FunctionTemplateContext functionTemplate() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(653); + setState(642); functionName(); - setState(654); + setState(643); match(T__0); - setState(666); + setState(655); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RIGHT - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TRUE - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (FUNCTION_ESC - 67)) | (1L << (DATE_ESC - 67)) | (1L << (TIME_ESC - 67)) | (1L << (TIMESTAMP_ESC - 67)) | (1L << (GUID_ESC - 67)) | (1L << (PLUS - 67)) | (1L << (MINUS - 67)) | (1L << (ASTERISK - 67)) | (1L << (PARAM - 67)) | (1L << (STRING - 67)) | (1L << (INTEGER_VALUE - 67)) | (1L << (DECIMAL_VALUE - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { { - setState(656); + setState(645); _la = _input.LA(1); if (_la==ALL || _la==DISTINCT) { { - setState(655); + setState(644); setQuantifier(); } } - setState(658); + setState(647); expression(); - setState(663); + setState(652); _errHandler.sync(this); _la = _input.LA(1); while (_la==T__2) { { { - setState(659); + setState(648); match(T__2); - setState(660); + setState(649); expression(); } } - setState(665); + setState(654); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(668); + setState(657); match(T__1); } } @@ -4909,19 +4897,19 @@ public final FunctionNameContext functionName() throws RecognitionException { FunctionNameContext _localctx = new FunctionNameContext(_ctx, getState()); enterRule(_localctx, 78, RULE_functionName); try { - setState(673); + setState(662); switch (_input.LA(1)) { case LEFT: enterOuterAlt(_localctx, 1); { - setState(670); + setState(659); match(LEFT); } break; case RIGHT: enterOuterAlt(_localctx, 2); { - setState(671); + setState(660); match(RIGHT); } break; @@ -4929,6 +4917,8 @@ public final FunctionNameContext functionName() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -4967,7 +4957,7 @@ public final FunctionNameContext functionName() throws RecognitionException { case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 3); { - setState(672); + setState(661); identifier(); } break; @@ -5198,13 +5188,13 @@ public final ConstantContext constant() throws RecognitionException { enterRule(_localctx, 80, RULE_constant); try { int _alt; - setState(701); + setState(690); switch (_input.LA(1)) { case NULL: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(675); + setState(664); match(NULL); } break; @@ -5212,7 +5202,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new IntervalLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(676); + setState(665); interval(); } break; @@ -5221,7 +5211,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(677); + setState(666); number(); } break; @@ -5230,7 +5220,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(678); + setState(667); booleanValue(); } break; @@ -5238,7 +5228,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(680); + setState(669); _errHandler.sync(this); _alt = 1; do { @@ -5246,7 +5236,7 @@ public final ConstantContext constant() throws RecognitionException { case 1: { { - setState(679); + setState(668); match(STRING); } } @@ -5254,9 +5244,9 @@ public final ConstantContext constant() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(682); + setState(671); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,91,_ctx); + _alt = getInterpreter().adaptivePredict(_input,88,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; @@ -5264,7 +5254,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new ParamLiteralContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(684); + setState(673); match(PARAM); } break; @@ -5272,11 +5262,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DateEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(685); + setState(674); match(DATE_ESC); - setState(686); + setState(675); string(); - setState(687); + setState(676); match(ESC_END); } break; @@ -5284,11 +5274,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimeEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(689); + setState(678); match(TIME_ESC); - setState(690); + setState(679); string(); - setState(691); + setState(680); match(ESC_END); } break; @@ -5296,11 +5286,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new TimestampEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(693); + setState(682); match(TIMESTAMP_ESC); - setState(694); + setState(683); string(); - setState(695); + setState(684); match(ESC_END); } break; @@ -5308,11 +5298,11 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new GuidEscapedLiteralContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(697); + setState(686); match(GUID_ESC); - setState(698); + setState(687); string(); - setState(699); + setState(688); match(ESC_END); } break; @@ -5365,7 +5355,7 @@ public final ComparisonOperatorContext comparisonOperator() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(703); + setState(692); _la = _input.LA(1); if ( !(((((_la - 100)) & ~0x3f) == 0 && ((1L << (_la - 100)) & ((1L << (EQ - 100)) | (1L << (NULLEQ - 100)) | (1L << (NEQ - 100)) | (1L << (LT - 100)) | (1L << (LTE - 100)) | (1L << (GT - 100)) | (1L << (GTE - 100)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5414,7 +5404,7 @@ public final BooleanValueContext booleanValue() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(705); + setState(694); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -5482,13 +5472,13 @@ public final IntervalContext interval() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(707); + setState(696); match(INTERVAL); - setState(709); + setState(698); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(708); + setState(697); ((IntervalContext)_localctx).sign = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -5499,35 +5489,35 @@ public final IntervalContext interval() throws RecognitionException { } } - setState(713); + setState(702); switch (_input.LA(1)) { case INTEGER_VALUE: case DECIMAL_VALUE: { - setState(711); + setState(700); ((IntervalContext)_localctx).valueNumeric = number(); } break; case PARAM: case STRING: { - setState(712); + setState(701); ((IntervalContext)_localctx).valuePattern = string(); } break; default: throw new NoViableAltException(this); } - setState(715); + setState(704); ((IntervalContext)_localctx).leading = intervalField(); - setState(718); + setState(707); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,95,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,92,_ctx) ) { case 1: { - setState(716); + setState(705); match(TO); - setState(717); + setState(706); ((IntervalContext)_localctx).trailing = intervalField(); } break; @@ -5584,7 +5574,7 @@ public final IntervalFieldContext intervalField() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(720); + setState(709); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << DAY) | (1L << DAYS) | (1L << HOUR) | (1L << HOURS) | (1L << MINUTE) | (1L << MINUTES) | (1L << MONTH) | (1L << MONTHS))) != 0) || ((((_la - 74)) & ~0x3f) == 0 && ((1L << (_la - 74)) & ((1L << (SECOND - 74)) | (1L << (SECONDS - 74)) | (1L << (YEAR - 74)) | (1L << (YEARS - 74)))) != 0)) ) { _errHandler.recoverInline(this); @@ -5642,7 +5632,7 @@ public final DataTypeContext dataType() throws RecognitionException { _localctx = new PrimitiveDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(722); + setState(711); identifier(); } } @@ -5694,25 +5684,25 @@ public final QualifiedNameContext qualifiedName() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(729); + setState(718); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,96,_ctx); + _alt = getInterpreter().adaptivePredict(_input,93,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(724); + setState(713); identifier(); - setState(725); + setState(714); match(DOT); } } } - setState(731); + setState(720); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,96,_ctx); + _alt = getInterpreter().adaptivePredict(_input,93,_ctx); } - setState(732); + setState(721); identifier(); } } @@ -5757,13 +5747,13 @@ public final IdentifierContext identifier() throws RecognitionException { IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); enterRule(_localctx, 94, RULE_identifier); try { - setState(736); + setState(725); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: case BACKQUOTED_IDENTIFIER: enterOuterAlt(_localctx, 1); { - setState(734); + setState(723); quoteIdentifier(); } break; @@ -5771,6 +5761,8 @@ public final IdentifierContext identifier() throws RecognitionException { case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -5807,7 +5799,7 @@ public final IdentifierContext identifier() throws RecognitionException { case DIGIT_IDENTIFIER: enterOuterAlt(_localctx, 2); { - setState(735); + setState(724); unquoteIdentifier(); } break; @@ -5860,43 +5852,43 @@ public final TableIdentifierContext tableIdentifier() throws RecognitionExceptio enterRule(_localctx, 96, RULE_tableIdentifier); int _la; try { - setState(750); + setState(739); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,100,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,97,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(741); + setState(730); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)) | (1L << (IDENTIFIER - 67)) | (1L << (DIGIT_IDENTIFIER - 67)) | (1L << (QUOTED_IDENTIFIER - 67)) | (1L << (BACKQUOTED_IDENTIFIER - 67)))) != 0)) { { - setState(738); + setState(727); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(739); + setState(728); match(T__3); } } - setState(743); + setState(732); match(TABLE_IDENTIFIER); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(747); + setState(736); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,99,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,96,_ctx) ) { case 1: { - setState(744); + setState(733); ((TableIdentifierContext)_localctx).catalog = identifier(); - setState(745); + setState(734); match(T__3); } break; } - setState(749); + setState(738); ((TableIdentifierContext)_localctx).name = identifier(); } break; @@ -5963,13 +5955,13 @@ public final QuoteIdentifierContext quoteIdentifier() throws RecognitionExceptio QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); enterRule(_localctx, 98, RULE_quoteIdentifier); try { - setState(754); + setState(743); switch (_input.LA(1)) { case QUOTED_IDENTIFIER: _localctx = new QuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(752); + setState(741); match(QUOTED_IDENTIFIER); } break; @@ -5977,7 +5969,7 @@ public final QuoteIdentifierContext quoteIdentifier() throws RecognitionExceptio _localctx = new BackQuotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(753); + setState(742); match(BACKQUOTED_IDENTIFIER); } break; @@ -6049,13 +6041,13 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); enterRule(_localctx, 100, RULE_unquoteIdentifier); try { - setState(759); + setState(748); switch (_input.LA(1)) { case IDENTIFIER: _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(756); + setState(745); match(IDENTIFIER); } break; @@ -6063,6 +6055,8 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce case ANALYZED: case CATALOGS: case COLUMNS: + case CURRENT_DATE: + case CURRENT_TIMESTAMP: case DAY: case DEBUG: case EXECUTABLE: @@ -6098,7 +6092,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new UnquotedIdentifierContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(757); + setState(746); nonReserved(); } break; @@ -6106,7 +6100,7 @@ public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionExce _localctx = new DigitIdentifierContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(758); + setState(747); match(DIGIT_IDENTIFIER); } break; @@ -6175,13 +6169,13 @@ public final NumberContext number() throws RecognitionException { NumberContext _localctx = new NumberContext(_ctx, getState()); enterRule(_localctx, 102, RULE_number); try { - setState(763); + setState(752); switch (_input.LA(1)) { case DECIMAL_VALUE: _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(761); + setState(750); match(DECIMAL_VALUE); } break; @@ -6189,7 +6183,7 @@ public final NumberContext number() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(762); + setState(751); match(INTEGER_VALUE); } break; @@ -6237,7 +6231,7 @@ public final StringContext string() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(765); + setState(754); _la = _input.LA(1); if ( !(_la==PARAM || _la==STRING) ) { _errHandler.recoverInline(this); @@ -6262,6 +6256,8 @@ public static class NonReservedContext extends ParserRuleContext { public TerminalNode ANALYZED() { return getToken(SqlBaseParser.ANALYZED, 0); } public TerminalNode CATALOGS() { return getToken(SqlBaseParser.CATALOGS, 0); } public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TerminalNode CURRENT_DATE() { return getToken(SqlBaseParser.CURRENT_DATE, 0); } + public TerminalNode CURRENT_TIMESTAMP() { return getToken(SqlBaseParser.CURRENT_TIMESTAMP, 0); } public TerminalNode DAY() { return getToken(SqlBaseParser.DAY, 0); } public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); } public TerminalNode EXECUTABLE() { return getToken(SqlBaseParser.EXECUTABLE, 0); } @@ -6320,9 +6316,9 @@ public final NonReservedContext nonReserved() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(767); + setState(756); _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)))) != 0)) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH) | (1L << OPTIMIZED))) != 0) || ((((_la - 67)) & ~0x3f) == 0 && ((1L << (_la - 67)) & ((1L << (PARSED - 67)) | (1L << (PHYSICAL - 67)) | (1L << (PLAN - 67)) | (1L << (RLIKE - 67)) | (1L << (QUERY - 67)) | (1L << (SCHEMAS - 67)) | (1L << (SECOND - 67)) | (1L << (SHOW - 67)) | (1L << (SYS - 67)) | (1L << (TABLES - 67)) | (1L << (TEXT - 67)) | (1L << (TYPE - 67)) | (1L << (TYPES - 67)) | (1L << (VERIFY - 67)) | (1L << (YEAR - 67)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -6371,7 +6367,7 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u0081\u0304\4\2\t"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u0081\u02f9\4\2\t"+ "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+ "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -6417,266 +6413,261 @@ private boolean valueExpression_sempred(ValueExpressionContext _localctx, int pr "\3\36\3\36\3\36\3\36\3\36\5\36\u0225\n\36\3\37\3\37\3\37\3\37\5\37\u022b"+ "\n\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\7\37\u0237\n\37"+ "\f\37\16\37\u023a\13\37\3 \3 \3 \3 \3 \3 \3 \5 \u0243\n \3 \3 \3 \3 \3"+ - " \3 \3 \3 \3 \3 \3 \5 \u0250\n \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\5!\u025c"+ - "\n!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\5#\u0268\n#\3#\3#\3#\5#\u026d"+ - "\n#\3#\5#\u0270\n#\5#\u0272\n#\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\5%"+ - "\u0280\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\5\'\u028e\n\'\3(\3"+ - "(\3(\5(\u0293\n(\3(\3(\3(\7(\u0298\n(\f(\16(\u029b\13(\5(\u029d\n(\3("+ - "\3(\3)\3)\3)\5)\u02a4\n)\3*\3*\3*\3*\3*\6*\u02ab\n*\r*\16*\u02ac\3*\3"+ - "*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\5*\u02c0\n*\3+\3+\3,\3"+ - ",\3-\3-\5-\u02c8\n-\3-\3-\5-\u02cc\n-\3-\3-\3-\5-\u02d1\n-\3.\3.\3/\3"+ - "/\3\60\3\60\3\60\7\60\u02da\n\60\f\60\16\60\u02dd\13\60\3\60\3\60\3\61"+ - "\3\61\5\61\u02e3\n\61\3\62\3\62\3\62\5\62\u02e8\n\62\3\62\3\62\3\62\3"+ - "\62\5\62\u02ee\n\62\3\62\5\62\u02f1\n\62\3\63\3\63\5\63\u02f5\n\63\3\64"+ - "\3\64\3\64\5\64\u02fa\n\64\3\65\3\65\5\65\u02fe\n\65\3\66\3\66\3\67\3"+ - "\67\3\67\2\4.<8\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62"+ - "\64\668:<>@BDFHJLNPRTVXZ\\^`bdfhjl\2\22\b\2\7\7\t\t\36\36\66\66AAEE\4"+ - "\2((SS\4\2\t\tAA\4\2%%--\3\2\32\33\3\2mn\4\2\7\7vv\4\2\r\r\32\32\4\2#"+ - "#\62\62\4\2\7\7\34\34\3\2oq\3\2fl\4\2\"\"TT\7\2\27\30+,8;LM\\]\3\2tu\30"+ - "\2\b\t\22\23\27\27\31\31\36\36 #$&(++//\62\62\65\6688::AAEGILOPRSVWY"+ - "Y\\\\\u0361\2n\3\2\2\2\4q\3\2\2\2\6\u00d9\3\2\2\2\b\u00e4\3\2\2\2\n\u00e8"+ - "\3\2\2\2\f\u00fd\3\2\2\2\16\u0104\3\2\2\2\20\u0106\3\2\2\2\22\u010e\3"+ - "\2\2\2\24\u012a\3\2\2\2\26\u0134\3\2\2\2\30\u013e\3\2\2\2\32\u014d\3\2"+ - "\2\2\34\u014f\3\2\2\2\36\u0155\3\2\2\2 \u0157\3\2\2\2\"\u015e\3\2\2\2"+ - "$\u0170\3\2\2\2&\u0181\3\2\2\2(\u0191\3\2\2\2*\u01ac\3\2\2\2,\u01ae\3"+ - "\2\2\2.\u01cf\3\2\2\2\60\u01e0\3\2\2\2\62\u01e3\3\2\2\2\64\u0215\3\2\2"+ - "\2\66\u0217\3\2\2\28\u021a\3\2\2\2:\u0224\3\2\2\2<\u022a\3\2\2\2>\u024f"+ - "\3\2\2\2@\u025b\3\2\2\2B\u025d\3\2\2\2D\u0271\3\2\2\2F\u0273\3\2\2\2H"+ - "\u027f\3\2\2\2J\u0281\3\2\2\2L\u028d\3\2\2\2N\u028f\3\2\2\2P\u02a3\3\2"+ - "\2\2R\u02bf\3\2\2\2T\u02c1\3\2\2\2V\u02c3\3\2\2\2X\u02c5\3\2\2\2Z\u02d2"+ - "\3\2\2\2\\\u02d4\3\2\2\2^\u02db\3\2\2\2`\u02e2\3\2\2\2b\u02f0\3\2\2\2"+ - "d\u02f4\3\2\2\2f\u02f9\3\2\2\2h\u02fd\3\2\2\2j\u02ff\3\2\2\2l\u0301\3"+ - "\2\2\2no\5\6\4\2op\7\2\2\3p\3\3\2\2\2qr\5,\27\2rs\7\2\2\3s\5\3\2\2\2t"+ - "\u00da\5\b\5\2u\u0083\7 \2\2v\177\7\3\2\2wx\7G\2\2x~\t\2\2\2yz\7$\2\2"+ - "z~\t\3\2\2{|\7Y\2\2|~\5V,\2}w\3\2\2\2}y\3\2\2\2}{\3\2\2\2~\u0081\3\2\2"+ - "\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080\u0082\3\2\2\2\u0081\177\3\2\2"+ - "\2\u0082\u0084\7\4\2\2\u0083v\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0085"+ - "\3\2\2\2\u0085\u00da\5\6\4\2\u0086\u0092\7\31\2\2\u0087\u008e\7\3\2\2"+ - "\u0088\u0089\7G\2\2\u0089\u008d\t\4\2\2\u008a\u008b\7$\2\2\u008b\u008d"+ - "\t\3\2\2\u008c\u0088\3\2\2\2\u008c\u008a\3\2\2\2\u008d\u0090\3\2\2\2\u008e"+ - "\u008c\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2\2\2\u0090\u008e\3\2"+ - "\2\2\u0091\u0093\7\4\2\2\u0092\u0087\3\2\2\2\u0092\u0093\3\2\2\2\u0093"+ - "\u0094\3\2\2\2\u0094\u00da\5\6\4\2\u0095\u0096\7O\2\2\u0096\u0099\7R\2"+ - "\2\u0097\u009a\5\66\34\2\u0098\u009a\5b\62\2\u0099\u0097\3\2\2\2\u0099"+ - "\u0098\3\2\2\2\u0099\u009a\3\2\2\2\u009a\u00da\3\2\2\2\u009b\u009c\7O"+ - "\2\2\u009c\u009d\7\23\2\2\u009d\u00a0\t\5\2\2\u009e\u00a1\5\66\34\2\u009f"+ - "\u00a1\5b\62\2\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1\u00da\3\2"+ - "\2\2\u00a2\u00a5\t\6\2\2\u00a3\u00a6\5\66\34\2\u00a4\u00a6\5b\62\2\u00a5"+ - "\u00a3\3\2\2\2\u00a5\u00a4\3\2\2\2\u00a6\u00da\3\2\2\2\u00a7\u00a8\7O"+ - "\2\2\u00a8\u00aa\7\'\2\2\u00a9\u00ab\5\66\34\2\u00aa\u00a9\3\2\2\2\u00aa"+ - "\u00ab\3\2\2\2\u00ab\u00da\3\2\2\2\u00ac\u00ad\7O\2\2\u00ad\u00da\7K\2"+ - "\2\u00ae\u00af\7P\2\2\u00af\u00b2\7R\2\2\u00b0\u00b1\7\21\2\2\u00b1\u00b3"+ - "\5\66\34\2\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b6\3\2\2\2"+ - "\u00b4\u00b7\5\66\34\2\u00b5\u00b7\5b\62\2\u00b6\u00b4\3\2\2\2\u00b6\u00b5"+ - "\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00c1\3\2\2\2\u00b8\u00b9\7V\2\2\u00b9"+ - "\u00be\5j\66\2\u00ba\u00bb\7\5\2\2\u00bb\u00bd\5j\66\2\u00bc\u00ba\3\2"+ - "\2\2\u00bd\u00c0\3\2\2\2\u00be\u00bc\3\2\2\2\u00be\u00bf\3\2\2\2\u00bf"+ - "\u00c2\3\2\2\2\u00c0\u00be\3\2\2\2\u00c1\u00b8\3\2\2\2\u00c1\u00c2\3\2"+ - "\2\2\u00c2\u00da\3\2\2\2\u00c3\u00c4\7P\2\2\u00c4\u00c7\7\23\2\2\u00c5"+ - "\u00c6\7\21\2\2\u00c6\u00c8\5j\66\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3"+ - "\2\2\2\u00c8\u00cc\3\2\2\2\u00c9\u00ca\7Q\2\2\u00ca\u00cd\5\66\34\2\u00cb"+ - "\u00cd\5b\62\2\u00cc\u00c9\3\2\2\2\u00cc\u00cb\3\2\2\2\u00cc\u00cd\3\2"+ - "\2\2\u00cd\u00cf\3\2\2\2\u00ce\u00d0\5\66\34\2\u00cf\u00ce\3\2\2\2\u00cf"+ - "\u00d0\3\2\2\2\u00d0\u00da\3\2\2\2\u00d1\u00d2\7P\2\2\u00d2\u00d7\7W\2"+ - "\2\u00d3\u00d5\t\7\2\2\u00d4\u00d3\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\u00d6"+ - "\3\2\2\2\u00d6\u00d8\5h\65\2\u00d7\u00d4\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8"+ - "\u00da\3\2\2\2\u00d9t\3\2\2\2\u00d9u\3\2\2\2\u00d9\u0086\3\2\2\2\u00d9"+ - "\u0095\3\2\2\2\u00d9\u009b\3\2\2\2\u00d9\u00a2\3\2\2\2\u00d9\u00a7\3\2"+ - "\2\2\u00d9\u00ac\3\2\2\2\u00d9\u00ae\3\2\2\2\u00d9\u00c3\3\2\2\2\u00d9"+ - "\u00d1\3\2\2\2\u00da\7\3\2\2\2\u00db\u00dc\7[\2\2\u00dc\u00e1\5\34\17"+ - "\2\u00dd\u00de\7\5\2\2\u00de\u00e0\5\34\17\2\u00df\u00dd\3\2\2\2\u00e0"+ - "\u00e3\3\2\2\2\u00e1\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2\u00e5\3\2"+ - "\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00db\3\2\2\2\u00e4\u00e5\3\2\2\2\u00e5"+ - "\u00e6\3\2\2\2\u00e6\u00e7\5\n\6\2\u00e7\t\3\2\2\2\u00e8\u00f3\5\16\b"+ - "\2\u00e9\u00ea\7C\2\2\u00ea\u00eb\7\17\2\2\u00eb\u00f0\5\20\t\2\u00ec"+ - "\u00ed\7\5\2\2\u00ed\u00ef\5\20\t\2\u00ee\u00ec\3\2\2\2\u00ef\u00f2\3"+ - "\2\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2"+ - "\u00f0\3\2\2\2\u00f3\u00e9\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4\u00f6\3\2"+ - "\2\2\u00f5\u00f7\5\f\7\2\u00f6\u00f5\3\2\2\2\u00f6\u00f7\3\2\2\2\u00f7"+ - "\13\3\2\2\2\u00f8\u00f9\7\65\2\2\u00f9\u00fe\t\b\2\2\u00fa\u00fb\7`\2"+ - "\2\u00fb\u00fc\t\b\2\2\u00fc\u00fe\7e\2\2\u00fd\u00f8\3\2\2\2\u00fd\u00fa"+ - "\3\2\2\2\u00fe\r\3\2\2\2\u00ff\u0105\5\22\n\2\u0100\u0101\7\3\2\2\u0101"+ - "\u0102\5\n\6\2\u0102\u0103\7\4\2\2\u0103\u0105\3\2\2\2\u0104\u00ff\3\2"+ - "\2\2\u0104\u0100\3\2\2\2\u0105\17\3\2\2\2\u0106\u0108\5,\27\2\u0107\u0109"+ - "\t\t\2\2\u0108\u0107\3\2\2\2\u0108\u0109\3\2\2\2\u0109\u010c\3\2\2\2\u010a"+ - "\u010b\7?\2\2\u010b\u010d\t\n\2\2\u010c\u010a\3\2\2\2\u010c\u010d\3\2"+ - "\2\2\u010d\21\3\2\2\2\u010e\u0110\7N\2\2\u010f\u0111\5\36\20\2\u0110\u010f"+ - "\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0112\3\2\2\2\u0112\u0117\5 \21\2\u0113"+ - "\u0114\7\5\2\2\u0114\u0116\5 \21\2\u0115\u0113\3\2\2\2\u0116\u0119\3\2"+ - "\2\2\u0117\u0115\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u011b\3\2\2\2\u0119"+ - "\u0117\3\2\2\2\u011a\u011c\5\24\13\2\u011b\u011a\3\2\2\2\u011b\u011c\3"+ - "\2\2\2\u011c\u011f\3\2\2\2\u011d\u011e\7Z\2\2\u011e\u0120\5.\30\2\u011f"+ - "\u011d\3\2\2\2\u011f\u0120\3\2\2\2\u0120\u0124\3\2\2\2\u0121\u0122\7)"+ - "\2\2\u0122\u0123\7\17\2\2\u0123\u0125\5\26\f\2\u0124\u0121\3\2\2\2\u0124"+ - "\u0125\3\2\2\2\u0125\u0128\3\2\2\2\u0126\u0127\7*\2\2\u0127\u0129\5.\30"+ - "\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2\2\2\u0129\23\3\2\2\2\u012a\u012b"+ - "\7%\2\2\u012b\u0130\5\"\22\2\u012c\u012d\7\5\2\2\u012d\u012f\5\"\22\2"+ - "\u012e\u012c\3\2\2\2\u012f\u0132\3\2\2\2\u0130\u012e\3\2\2\2\u0130\u0131"+ - "\3\2\2\2\u0131\25\3\2\2\2\u0132\u0130\3\2\2\2\u0133\u0135\5\36\20\2\u0134"+ - "\u0133\3\2\2\2\u0134\u0135\3\2\2\2\u0135\u0136\3\2\2\2\u0136\u013b\5\30"+ - "\r\2\u0137\u0138\7\5\2\2\u0138\u013a\5\30\r\2\u0139\u0137\3\2\2\2\u013a"+ - "\u013d\3\2\2\2\u013b\u0139\3\2\2\2\u013b\u013c\3\2\2\2\u013c\27\3\2\2"+ - "\2\u013d\u013b\3\2\2\2\u013e\u013f\5\32\16\2\u013f\31\3\2\2\2\u0140\u0149"+ - "\7\3\2\2\u0141\u0146\5,\27\2\u0142\u0143\7\5\2\2\u0143\u0145\5,\27\2\u0144"+ - "\u0142\3\2\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146\u0147\3\2"+ - "\2\2\u0147\u014a\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u0141\3\2\2\2\u0149"+ - "\u014a\3\2\2\2\u014a\u014b\3\2\2\2\u014b\u014e\7\4\2\2\u014c\u014e\5,"+ - "\27\2\u014d\u0140\3\2\2\2\u014d\u014c\3\2\2\2\u014e\33\3\2\2\2\u014f\u0150"+ - "\5`\61\2\u0150\u0151\7\f\2\2\u0151\u0152\7\3\2\2\u0152\u0153\5\n\6\2\u0153"+ - "\u0154\7\4\2\2\u0154\35\3\2\2\2\u0155\u0156\t\13\2\2\u0156\37\3\2\2\2"+ - "\u0157\u015c\5,\27\2\u0158\u015a\7\f\2\2\u0159\u0158\3\2\2\2\u0159\u015a"+ - "\3\2\2\2\u015a\u015b\3\2\2\2\u015b\u015d\5`\61\2\u015c\u0159\3\2\2\2\u015c"+ - "\u015d\3\2\2\2\u015d!\3\2\2\2\u015e\u0162\5*\26\2\u015f\u0161\5$\23\2"+ - "\u0160\u015f\3\2\2\2\u0161\u0164\3\2\2\2\u0162\u0160\3\2\2\2\u0162\u0163"+ - "\3\2\2\2\u0163#\3\2\2\2\u0164\u0162\3\2\2\2\u0165\u0166\5&\24\2\u0166"+ - "\u0167\7\61\2\2\u0167\u0169\5*\26\2\u0168\u016a\5(\25\2\u0169\u0168\3"+ - "\2\2\2\u0169\u016a\3\2\2\2\u016a\u0171\3\2\2\2\u016b\u016c\7<\2\2\u016c"+ - "\u016d\5&\24\2\u016d\u016e\7\61\2\2\u016e\u016f\5*\26\2\u016f\u0171\3"+ - "\2\2\2\u0170\u0165\3\2\2\2\u0170\u016b\3\2\2\2\u0171%\3\2\2\2\u0172\u0174"+ - "\7.\2\2\u0173\u0172\3\2\2\2\u0173\u0174\3\2\2\2\u0174\u0182\3\2\2\2\u0175"+ - "\u0177\7\63\2\2\u0176\u0178\7D\2\2\u0177\u0176\3\2\2\2\u0177\u0178\3\2"+ - "\2\2\u0178\u0182\3\2\2\2\u0179\u017b\7H\2\2\u017a\u017c\7D\2\2\u017b\u017a"+ - "\3\2\2\2\u017b\u017c\3\2\2\2\u017c\u0182\3\2\2\2\u017d\u017f\7&\2\2\u017e"+ - "\u0180\7D\2\2\u017f\u017e\3\2\2\2\u017f\u0180\3\2\2\2\u0180\u0182\3\2"+ - "\2\2\u0181\u0173\3\2\2\2\u0181\u0175\3\2\2\2\u0181\u0179\3\2\2\2\u0181"+ - "\u017d\3\2\2\2\u0182\'\3\2\2\2\u0183\u0184\7@\2\2\u0184\u0192\5.\30\2"+ - "\u0185\u0186\7X\2\2\u0186\u0187\7\3\2\2\u0187\u018c\5`\61\2\u0188\u0189"+ - "\7\5\2\2\u0189\u018b\5`\61\2\u018a\u0188\3\2\2\2\u018b\u018e\3\2\2\2\u018c"+ - "\u018a\3\2\2\2\u018c\u018d\3\2\2\2\u018d\u018f\3\2\2\2\u018e\u018c\3\2"+ - "\2\2\u018f\u0190\7\4\2\2\u0190\u0192\3\2\2\2\u0191\u0183\3\2\2\2\u0191"+ - "\u0185\3\2\2\2\u0192)\3\2\2\2\u0193\u0198\5b\62\2\u0194\u0196\7\f\2\2"+ - "\u0195\u0194\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2\2\2\u0197\u0199"+ - "\5^\60\2\u0198\u0195\3\2\2\2\u0198\u0199\3\2\2\2\u0199\u01ad\3\2\2\2\u019a"+ - "\u019b\7\3\2\2\u019b\u019c\5\n\6\2\u019c\u01a1\7\4\2\2\u019d\u019f\7\f"+ - "\2\2\u019e\u019d\3\2\2\2\u019e\u019f\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0"+ - "\u01a2\5^\60\2\u01a1\u019e\3\2\2\2\u01a1\u01a2\3\2\2\2\u01a2\u01ad\3\2"+ - "\2\2\u01a3\u01a4\7\3\2\2\u01a4\u01a5\5\"\22\2\u01a5\u01aa\7\4\2\2\u01a6"+ - "\u01a8\7\f\2\2\u01a7\u01a6\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01a9\3\2"+ - "\2\2\u01a9\u01ab\5^\60\2\u01aa\u01a7\3\2\2\2\u01aa\u01ab\3\2\2\2\u01ab"+ - "\u01ad\3\2\2\2\u01ac\u0193\3\2\2\2\u01ac\u019a\3\2\2\2\u01ac\u01a3\3\2"+ - "\2\2\u01ad+\3\2\2\2\u01ae\u01af\5.\30\2\u01af-\3\2\2\2\u01b0\u01b1\b\30"+ - "\1\2\u01b1\u01b2\7=\2\2\u01b2\u01d0\5.\30\n\u01b3\u01b4\7\37\2\2\u01b4"+ - "\u01b5\7\3\2\2\u01b5\u01b6\5\b\5\2\u01b6\u01b7\7\4\2\2\u01b7\u01d0\3\2"+ - "\2\2\u01b8\u01b9\7J\2\2\u01b9\u01ba\7\3\2\2\u01ba\u01bb\5j\66\2\u01bb"+ - "\u01bc\5\60\31\2\u01bc\u01bd\7\4\2\2\u01bd\u01d0\3\2\2\2\u01be\u01bf\7"+ - "\67\2\2\u01bf\u01c0\7\3\2\2\u01c0\u01c1\5^\60\2\u01c1\u01c2\7\5\2\2\u01c2"+ - "\u01c3\5j\66\2\u01c3\u01c4\5\60\31\2\u01c4\u01c5\7\4\2\2\u01c5\u01d0\3"+ - "\2\2\2\u01c6\u01c7\7\67\2\2\u01c7\u01c8\7\3\2\2\u01c8\u01c9\5j\66\2\u01c9"+ - "\u01ca\7\5\2\2\u01ca\u01cb\5j\66\2\u01cb\u01cc\5\60\31\2\u01cc\u01cd\7"+ - "\4\2\2\u01cd\u01d0\3\2\2\2\u01ce\u01d0\5\62\32\2\u01cf\u01b0\3\2\2\2\u01cf"+ - "\u01b3\3\2\2\2\u01cf\u01b8\3\2\2\2\u01cf\u01be\3\2\2\2\u01cf\u01c6\3\2"+ - "\2\2\u01cf\u01ce\3\2\2\2\u01d0\u01d9\3\2\2\2\u01d1\u01d2\f\4\2\2\u01d2"+ - "\u01d3\7\n\2\2\u01d3\u01d8\5.\30\5\u01d4\u01d5\f\3\2\2\u01d5\u01d6\7B"+ - "\2\2\u01d6\u01d8\5.\30\4\u01d7\u01d1\3\2\2\2\u01d7\u01d4\3\2\2\2\u01d8"+ - "\u01db\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da/\3\2\2\2"+ - "\u01db\u01d9\3\2\2\2\u01dc\u01dd\7\5\2\2\u01dd\u01df\5j\66\2\u01de\u01dc"+ - "\3\2\2\2\u01df\u01e2\3\2\2\2\u01e0\u01de\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1"+ - "\61\3\2\2\2\u01e2\u01e0\3\2\2\2\u01e3\u01e5\5<\37\2\u01e4\u01e6\5\64\33"+ - "\2\u01e5\u01e4\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6\63\3\2\2\2\u01e7\u01e9"+ - "\7=\2\2\u01e8\u01e7\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea"+ - "\u01eb\7\16\2\2\u01eb\u01ec\5<\37\2\u01ec\u01ed\7\n\2\2\u01ed\u01ee\5"+ - "<\37\2\u01ee\u0216\3\2\2\2\u01ef\u01f1\7=\2\2\u01f0\u01ef\3\2\2\2\u01f0"+ - "\u01f1\3\2\2\2\u01f1\u01f2\3\2\2\2\u01f2\u01f3\7-\2\2\u01f3\u01f4\7\3"+ - "\2\2\u01f4\u01f9\5<\37\2\u01f5\u01f6\7\5\2\2\u01f6\u01f8\5<\37\2\u01f7"+ - "\u01f5\3\2\2\2\u01f8\u01fb\3\2\2\2\u01f9\u01f7\3\2\2\2\u01f9\u01fa\3\2"+ - "\2\2\u01fa\u01fc\3\2\2\2\u01fb\u01f9\3\2\2\2\u01fc\u01fd\7\4\2\2\u01fd"+ - "\u0216\3\2\2\2\u01fe\u0200\7=\2\2\u01ff\u01fe\3\2\2\2\u01ff\u0200\3\2"+ - "\2\2\u0200\u0201\3\2\2\2\u0201\u0202\7-\2\2\u0202\u0203\7\3\2\2\u0203"+ - "\u0204\5\b\5\2\u0204\u0205\7\4\2\2\u0205\u0216\3\2\2\2\u0206\u0208\7="+ - "\2\2\u0207\u0206\3\2\2\2\u0207\u0208\3\2\2\2\u0208\u0209\3\2\2\2\u0209"+ - "\u020a\7\64\2\2\u020a\u0216\58\35\2\u020b\u020d\7=\2\2\u020c\u020b\3\2"+ - "\2\2\u020c\u020d\3\2\2\2\u020d\u020e\3\2\2\2\u020e\u020f\7I\2\2\u020f"+ - "\u0216\5j\66\2\u0210\u0212\7\60\2\2\u0211\u0213\7=\2\2\u0212\u0211\3\2"+ - "\2\2\u0212\u0213\3\2\2\2\u0213\u0214\3\2\2\2\u0214\u0216\7>\2\2\u0215"+ - "\u01e8\3\2\2\2\u0215\u01f0\3\2\2\2\u0215\u01ff\3\2\2\2\u0215\u0207\3\2"+ - "\2\2\u0215\u020c\3\2\2\2\u0215\u0210\3\2\2\2\u0216\65\3\2\2\2\u0217\u0218"+ - "\7\64\2\2\u0218\u0219\58\35\2\u0219\67\3\2\2\2\u021a\u021c\5j\66\2\u021b"+ - "\u021d\5:\36\2\u021c\u021b\3\2\2\2\u021c\u021d\3\2\2\2\u021d9\3\2\2\2"+ - "\u021e\u021f\7\35\2\2\u021f\u0225\5j\66\2\u0220\u0221\7^\2\2\u0221\u0222"+ - "\5j\66\2\u0222\u0223\7e\2\2\u0223\u0225\3\2\2\2\u0224\u021e\3\2\2\2\u0224"+ - "\u0220\3\2\2\2\u0225;\3\2\2\2\u0226\u0227\b\37\1\2\u0227\u022b\5> \2\u0228"+ - "\u0229\t\7\2\2\u0229\u022b\5<\37\6\u022a\u0226\3\2\2\2\u022a\u0228\3\2"+ - "\2\2\u022b\u0238\3\2\2\2\u022c\u022d\f\5\2\2\u022d\u022e\t\f\2\2\u022e"+ - "\u0237\5<\37\6\u022f\u0230\f\4\2\2\u0230\u0231\t\7\2\2\u0231\u0237\5<"+ - "\37\5\u0232\u0233\f\3\2\2\u0233\u0234\5T+\2\u0234\u0235\5<\37\4\u0235"+ - "\u0237\3\2\2\2\u0236\u022c\3\2\2\2\u0236\u022f\3\2\2\2\u0236\u0232\3\2"+ - "\2\2\u0237\u023a\3\2\2\2\u0238\u0236\3\2\2\2\u0238\u0239\3\2\2\2\u0239"+ - "=\3\2\2\2\u023a\u0238\3\2\2\2\u023b\u0250\5@!\2\u023c\u0250\5H%\2\u023d"+ - "\u0250\5D#\2\u023e\u0250\5R*\2\u023f\u0240\5^\60\2\u0240\u0241\7s\2\2"+ - "\u0241\u0243\3\2\2\2\u0242\u023f\3\2\2\2\u0242\u0243\3\2\2\2\u0243\u0244"+ - "\3\2\2\2\u0244\u0250\7o\2\2\u0245\u0250\5L\'\2\u0246\u0247\7\3\2\2\u0247"+ - "\u0248\5\b\5\2\u0248\u0249\7\4\2\2\u0249\u0250\3\2\2\2\u024a\u0250\5^"+ - "\60\2\u024b\u024c\7\3\2\2\u024c\u024d\5,\27\2\u024d\u024e\7\4\2\2\u024e"+ - "\u0250\3\2\2\2\u024f\u023b\3\2\2\2\u024f\u023c\3\2\2\2\u024f\u023d\3\2"+ - "\2\2\u024f\u023e\3\2\2\2\u024f\u0242\3\2\2\2\u024f\u0245\3\2\2\2\u024f"+ - "\u0246\3\2\2\2\u024f\u024a\3\2\2\2\u024f\u024b\3\2\2\2\u0250?\3\2\2\2"+ - "\u0251\u025c\5B\"\2\u0252\u0253\7_\2\2\u0253\u0254\5B\"\2\u0254\u0255"+ - "\7e\2\2\u0255\u025c\3\2\2\2\u0256\u025c\5F$\2\u0257\u0258\7_\2\2\u0258"+ - "\u0259\5F$\2\u0259\u025a\7e\2\2\u025a\u025c\3\2\2\2\u025b\u0251\3\2\2"+ - "\2\u025b\u0252\3\2\2\2\u025b\u0256\3\2\2\2\u025b\u0257\3\2\2\2\u025cA"+ - "\3\2\2\2\u025d\u025e\7\20\2\2\u025e\u025f\7\3\2\2\u025f\u0260\5,\27\2"+ - "\u0260\u0261\7\f\2\2\u0261\u0262\5\\/\2\u0262\u0263\7\4\2\2\u0263C\3\2"+ - "\2\2\u0264\u0267\7\25\2\2\u0265\u0266\7\3\2\2\u0266\u0268\7\4\2\2\u0267"+ - "\u0265\3\2\2\2\u0267\u0268\3\2\2\2\u0268\u0272\3\2\2\2\u0269\u026f\7\26"+ - "\2\2\u026a\u026c\7\3\2\2\u026b\u026d\7v\2\2\u026c\u026b\3\2\2\2\u026c"+ - "\u026d\3\2\2\2\u026d\u026e\3\2\2\2\u026e\u0270\7\4\2\2\u026f\u026a\3\2"+ - "\2\2\u026f\u0270\3\2\2\2\u0270\u0272\3\2\2\2\u0271\u0264\3\2\2\2\u0271"+ - "\u0269\3\2\2\2\u0272E\3\2\2\2\u0273\u0274\7\24\2\2\u0274\u0275\7\3\2\2"+ - "\u0275\u0276\5,\27\2\u0276\u0277\7\5\2\2\u0277\u0278\5\\/\2\u0278\u0279"+ - "\7\4\2\2\u0279G\3\2\2\2\u027a\u0280\5J&\2\u027b\u027c\7_\2\2\u027c\u027d"+ - "\5J&\2\u027d\u027e\7e\2\2\u027e\u0280\3\2\2\2\u027f\u027a\3\2\2\2\u027f"+ - "\u027b\3\2\2\2\u0280I\3\2\2\2\u0281\u0282\7!\2\2\u0282\u0283\7\3\2\2\u0283"+ - "\u0284\5`\61\2\u0284\u0285\7%\2\2\u0285\u0286\5<\37\2\u0286\u0287\7\4"+ - "\2\2\u0287K\3\2\2\2\u0288\u028e\5N(\2\u0289\u028a\7_\2\2\u028a\u028b\5"+ - "N(\2\u028b\u028c\7e\2\2\u028c\u028e\3\2\2\2\u028d\u0288\3\2\2\2\u028d"+ - "\u0289\3\2\2\2\u028eM\3\2\2\2\u028f\u0290\5P)\2\u0290\u029c\7\3\2\2\u0291"+ - "\u0293\5\36\20\2\u0292\u0291\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0294\3"+ - "\2\2\2\u0294\u0299\5,\27\2\u0295\u0296\7\5\2\2\u0296\u0298\5,\27\2\u0297"+ - "\u0295\3\2\2\2\u0298\u029b\3\2\2\2\u0299\u0297\3\2\2\2\u0299\u029a\3\2"+ - "\2\2\u029a\u029d\3\2\2\2\u029b\u0299\3\2\2\2\u029c\u0292\3\2\2\2\u029c"+ - "\u029d\3\2\2\2\u029d\u029e\3\2\2\2\u029e\u029f\7\4\2\2\u029fO\3\2\2\2"+ - "\u02a0\u02a4\7\63\2\2\u02a1\u02a4\7H\2\2\u02a2\u02a4\5`\61\2\u02a3\u02a0"+ - "\3\2\2\2\u02a3\u02a1\3\2\2\2\u02a3\u02a2\3\2\2\2\u02a4Q\3\2\2\2\u02a5"+ - "\u02c0\7>\2\2\u02a6\u02c0\5X-\2\u02a7\u02c0\5h\65\2\u02a8\u02c0\5V,\2"+ - "\u02a9\u02ab\7u\2\2\u02aa\u02a9\3\2\2\2\u02ab\u02ac\3\2\2\2\u02ac\u02aa"+ - "\3\2\2\2\u02ac\u02ad\3\2\2\2\u02ad\u02c0\3\2\2\2\u02ae\u02c0\7t\2\2\u02af"+ - "\u02b0\7a\2\2\u02b0\u02b1\5j\66\2\u02b1\u02b2\7e\2\2\u02b2\u02c0\3\2\2"+ - "\2\u02b3\u02b4\7b\2\2\u02b4\u02b5\5j\66\2\u02b5\u02b6\7e\2\2\u02b6\u02c0"+ - "\3\2\2\2\u02b7\u02b8\7c\2\2\u02b8\u02b9\5j\66\2\u02b9\u02ba\7e\2\2\u02ba"+ - "\u02c0\3\2\2\2\u02bb\u02bc\7d\2\2\u02bc\u02bd\5j\66\2\u02bd\u02be\7e\2"+ - "\2\u02be\u02c0\3\2\2\2\u02bf\u02a5\3\2\2\2\u02bf\u02a6\3\2\2\2\u02bf\u02a7"+ - "\3\2\2\2\u02bf\u02a8\3\2\2\2\u02bf\u02aa\3\2\2\2\u02bf\u02ae\3\2\2\2\u02bf"+ - "\u02af\3\2\2\2\u02bf\u02b3\3\2\2\2\u02bf\u02b7\3\2\2\2\u02bf\u02bb\3\2"+ - "\2\2\u02c0S\3\2\2\2\u02c1\u02c2\t\r\2\2\u02c2U\3\2\2\2\u02c3\u02c4\t\16"+ - "\2\2\u02c4W\3\2\2\2\u02c5\u02c7\7/\2\2\u02c6\u02c8\t\7\2\2\u02c7\u02c6"+ - "\3\2\2\2\u02c7\u02c8\3\2\2\2\u02c8\u02cb\3\2\2\2\u02c9\u02cc\5h\65\2\u02ca"+ - "\u02cc\5j\66\2\u02cb\u02c9\3\2\2\2\u02cb\u02ca\3\2\2\2\u02cc\u02cd\3\2"+ - "\2\2\u02cd\u02d0\5Z.\2\u02ce\u02cf\7U\2\2\u02cf\u02d1\5Z.\2\u02d0\u02ce"+ - "\3\2\2\2\u02d0\u02d1\3\2\2\2\u02d1Y\3\2\2\2\u02d2\u02d3\t\17\2\2\u02d3"+ - "[\3\2\2\2\u02d4\u02d5\5`\61\2\u02d5]\3\2\2\2\u02d6\u02d7\5`\61\2\u02d7"+ - "\u02d8\7s\2\2\u02d8\u02da\3\2\2\2\u02d9\u02d6\3\2\2\2\u02da\u02dd\3\2"+ - "\2\2\u02db\u02d9\3\2\2\2\u02db\u02dc\3\2\2\2\u02dc\u02de\3\2\2\2\u02dd"+ - "\u02db\3\2\2\2\u02de\u02df\5`\61\2\u02df_\3\2\2\2\u02e0\u02e3\5d\63\2"+ - "\u02e1\u02e3\5f\64\2\u02e2\u02e0\3\2\2\2\u02e2\u02e1\3\2\2\2\u02e3a\3"+ - "\2\2\2\u02e4\u02e5\5`\61\2\u02e5\u02e6\7\6\2\2\u02e6\u02e8\3\2\2\2\u02e7"+ - "\u02e4\3\2\2\2\u02e7\u02e8\3\2\2\2\u02e8\u02e9\3\2\2\2\u02e9\u02f1\7z"+ - "\2\2\u02ea\u02eb\5`\61\2\u02eb\u02ec\7\6\2\2\u02ec\u02ee\3\2\2\2\u02ed"+ - "\u02ea\3\2\2\2\u02ed\u02ee\3\2\2\2\u02ee\u02ef\3\2\2\2\u02ef\u02f1\5`"+ - "\61\2\u02f0\u02e7\3\2\2\2\u02f0\u02ed\3\2\2\2\u02f1c\3\2\2\2\u02f2\u02f5"+ - "\7{\2\2\u02f3\u02f5\7|\2\2\u02f4\u02f2\3\2\2\2\u02f4\u02f3\3\2\2\2\u02f5"+ - "e\3\2\2\2\u02f6\u02fa\7x\2\2\u02f7\u02fa\5l\67\2\u02f8\u02fa\7y\2\2\u02f9"+ - "\u02f6\3\2\2\2\u02f9\u02f7\3\2\2\2\u02f9\u02f8\3\2\2\2\u02fag\3\2\2\2"+ - "\u02fb\u02fe\7w\2\2\u02fc\u02fe\7v\2\2\u02fd\u02fb\3\2\2\2\u02fd\u02fc"+ - "\3\2\2\2\u02fei\3\2\2\2\u02ff\u0300\t\20\2\2\u0300k\3\2\2\2\u0301\u0302"+ - "\t\21\2\2\u0302m\3\2\2\2j}\177\u0083\u008c\u008e\u0092\u0099\u00a0\u00a5"+ + " \3 \3 \3 \3 \3 \3 \5 \u0250\n \3!\3!\5!\u0254\n!\3\"\3\"\3\"\3\"\3\""+ + "\3\"\3\"\3\"\3\"\3\"\5\"\u0260\n\"\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3"+ + "$\3$\3$\3%\3%\3%\3%\3%\5%\u0275\n%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3"+ + "\'\3\'\5\'\u0283\n\'\3(\3(\3(\5(\u0288\n(\3(\3(\3(\7(\u028d\n(\f(\16("+ + "\u0290\13(\5(\u0292\n(\3(\3(\3)\3)\3)\5)\u0299\n)\3*\3*\3*\3*\3*\6*\u02a0"+ + "\n*\r*\16*\u02a1\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\5"+ + "*\u02b5\n*\3+\3+\3,\3,\3-\3-\5-\u02bd\n-\3-\3-\5-\u02c1\n-\3-\3-\3-\5"+ + "-\u02c6\n-\3.\3.\3/\3/\3\60\3\60\3\60\7\60\u02cf\n\60\f\60\16\60\u02d2"+ + "\13\60\3\60\3\60\3\61\3\61\5\61\u02d8\n\61\3\62\3\62\3\62\5\62\u02dd\n"+ + "\62\3\62\3\62\3\62\3\62\5\62\u02e3\n\62\3\62\5\62\u02e6\n\62\3\63\3\63"+ + "\5\63\u02ea\n\63\3\64\3\64\3\64\5\64\u02ef\n\64\3\65\3\65\5\65\u02f3\n"+ + "\65\3\66\3\66\3\67\3\67\3\67\2\4.<8\2\4\6\b\n\f\16\20\22\24\26\30\32\34"+ + "\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`bdfhjl\2\22\b\2\7\7\t\t"+ + "\36\36\66\66AAEE\4\2((SS\4\2\t\tAA\4\2%%--\3\2\32\33\3\2mn\4\2\7\7vv\4"+ + "\2\r\r\32\32\4\2##\62\62\4\2\7\7\34\34\3\2oq\3\2fl\4\2\"\"TT\7\2\27\30"+ + "+,8;LM\\]\3\2tu\30\2\b\t\22\23\25\27\31\31\36\36 #$&(++//\62\62\65\66"+ + "88::AAEGILOPRSVWYY\\\\\u0353\2n\3\2\2\2\4q\3\2\2\2\6\u00d9\3\2\2\2\b\u00e4"+ + "\3\2\2\2\n\u00e8\3\2\2\2\f\u00fd\3\2\2\2\16\u0104\3\2\2\2\20\u0106\3\2"+ + "\2\2\22\u010e\3\2\2\2\24\u012a\3\2\2\2\26\u0134\3\2\2\2\30\u013e\3\2\2"+ + "\2\32\u014d\3\2\2\2\34\u014f\3\2\2\2\36\u0155\3\2\2\2 \u0157\3\2\2\2\""+ + "\u015e\3\2\2\2$\u0170\3\2\2\2&\u0181\3\2\2\2(\u0191\3\2\2\2*\u01ac\3\2"+ + "\2\2,\u01ae\3\2\2\2.\u01cf\3\2\2\2\60\u01e0\3\2\2\2\62\u01e3\3\2\2\2\64"+ + "\u0215\3\2\2\2\66\u0217\3\2\2\28\u021a\3\2\2\2:\u0224\3\2\2\2<\u022a\3"+ + "\2\2\2>\u024f\3\2\2\2@\u0253\3\2\2\2B\u025f\3\2\2\2D\u0261\3\2\2\2F\u0268"+ + "\3\2\2\2H\u0274\3\2\2\2J\u0276\3\2\2\2L\u0282\3\2\2\2N\u0284\3\2\2\2P"+ + "\u0298\3\2\2\2R\u02b4\3\2\2\2T\u02b6\3\2\2\2V\u02b8\3\2\2\2X\u02ba\3\2"+ + "\2\2Z\u02c7\3\2\2\2\\\u02c9\3\2\2\2^\u02d0\3\2\2\2`\u02d7\3\2\2\2b\u02e5"+ + "\3\2\2\2d\u02e9\3\2\2\2f\u02ee\3\2\2\2h\u02f2\3\2\2\2j\u02f4\3\2\2\2l"+ + "\u02f6\3\2\2\2no\5\6\4\2op\7\2\2\3p\3\3\2\2\2qr\5,\27\2rs\7\2\2\3s\5\3"+ + "\2\2\2t\u00da\5\b\5\2u\u0083\7 \2\2v\177\7\3\2\2wx\7G\2\2x~\t\2\2\2yz"+ + "\7$\2\2z~\t\3\2\2{|\7Y\2\2|~\5V,\2}w\3\2\2\2}y\3\2\2\2}{\3\2\2\2~\u0081"+ + "\3\2\2\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080\u0082\3\2\2\2\u0081\177"+ + "\3\2\2\2\u0082\u0084\7\4\2\2\u0083v\3\2\2\2\u0083\u0084\3\2\2\2\u0084"+ + "\u0085\3\2\2\2\u0085\u00da\5\6\4\2\u0086\u0092\7\31\2\2\u0087\u008e\7"+ + "\3\2\2\u0088\u0089\7G\2\2\u0089\u008d\t\4\2\2\u008a\u008b\7$\2\2\u008b"+ + "\u008d\t\3\2\2\u008c\u0088\3\2\2\2\u008c\u008a\3\2\2\2\u008d\u0090\3\2"+ + "\2\2\u008e\u008c\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0091\3\2\2\2\u0090"+ + "\u008e\3\2\2\2\u0091\u0093\7\4\2\2\u0092\u0087\3\2\2\2\u0092\u0093\3\2"+ + "\2\2\u0093\u0094\3\2\2\2\u0094\u00da\5\6\4\2\u0095\u0096\7O\2\2\u0096"+ + "\u0099\7R\2\2\u0097\u009a\5\66\34\2\u0098\u009a\5b\62\2\u0099\u0097\3"+ + "\2\2\2\u0099\u0098\3\2\2\2\u0099\u009a\3\2\2\2\u009a\u00da\3\2\2\2\u009b"+ + "\u009c\7O\2\2\u009c\u009d\7\23\2\2\u009d\u00a0\t\5\2\2\u009e\u00a1\5\66"+ + "\34\2\u009f\u00a1\5b\62\2\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1"+ + "\u00da\3\2\2\2\u00a2\u00a5\t\6\2\2\u00a3\u00a6\5\66\34\2\u00a4\u00a6\5"+ + "b\62\2\u00a5\u00a3\3\2\2\2\u00a5\u00a4\3\2\2\2\u00a6\u00da\3\2\2\2\u00a7"+ + "\u00a8\7O\2\2\u00a8\u00aa\7\'\2\2\u00a9\u00ab\5\66\34\2\u00aa\u00a9\3"+ + "\2\2\2\u00aa\u00ab\3\2\2\2\u00ab\u00da\3\2\2\2\u00ac\u00ad\7O\2\2\u00ad"+ + "\u00da\7K\2\2\u00ae\u00af\7P\2\2\u00af\u00b2\7R\2\2\u00b0\u00b1\7\21\2"+ + "\2\u00b1\u00b3\5\66\34\2\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3"+ + "\u00b6\3\2\2\2\u00b4\u00b7\5\66\34\2\u00b5\u00b7\5b\62\2\u00b6\u00b4\3"+ + "\2\2\2\u00b6\u00b5\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00c1\3\2\2\2\u00b8"+ + "\u00b9\7V\2\2\u00b9\u00be\5j\66\2\u00ba\u00bb\7\5\2\2\u00bb\u00bd\5j\66"+ + "\2\u00bc\u00ba\3\2\2\2\u00bd\u00c0\3\2\2\2\u00be\u00bc\3\2\2\2\u00be\u00bf"+ + "\3\2\2\2\u00bf\u00c2\3\2\2\2\u00c0\u00be\3\2\2\2\u00c1\u00b8\3\2\2\2\u00c1"+ + "\u00c2\3\2\2\2\u00c2\u00da\3\2\2\2\u00c3\u00c4\7P\2\2\u00c4\u00c7\7\23"+ + "\2\2\u00c5\u00c6\7\21\2\2\u00c6\u00c8\5j\66\2\u00c7\u00c5\3\2\2\2\u00c7"+ + "\u00c8\3\2\2\2\u00c8\u00cc\3\2\2\2\u00c9\u00ca\7Q\2\2\u00ca\u00cd\5\66"+ + "\34\2\u00cb\u00cd\5b\62\2\u00cc\u00c9\3\2\2\2\u00cc\u00cb\3\2\2\2\u00cc"+ + "\u00cd\3\2\2\2\u00cd\u00cf\3\2\2\2\u00ce\u00d0\5\66\34\2\u00cf\u00ce\3"+ + "\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\u00da\3\2\2\2\u00d1\u00d2\7P\2\2\u00d2"+ + "\u00d7\7W\2\2\u00d3\u00d5\t\7\2\2\u00d4\u00d3\3\2\2\2\u00d4\u00d5\3\2"+ + "\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00d8\5h\65\2\u00d7\u00d4\3\2\2\2\u00d7"+ + "\u00d8\3\2\2\2\u00d8\u00da\3\2\2\2\u00d9t\3\2\2\2\u00d9u\3\2\2\2\u00d9"+ + "\u0086\3\2\2\2\u00d9\u0095\3\2\2\2\u00d9\u009b\3\2\2\2\u00d9\u00a2\3\2"+ + "\2\2\u00d9\u00a7\3\2\2\2\u00d9\u00ac\3\2\2\2\u00d9\u00ae\3\2\2\2\u00d9"+ + "\u00c3\3\2\2\2\u00d9\u00d1\3\2\2\2\u00da\7\3\2\2\2\u00db\u00dc\7[\2\2"+ + "\u00dc\u00e1\5\34\17\2\u00dd\u00de\7\5\2\2\u00de\u00e0\5\34\17\2\u00df"+ + "\u00dd\3\2\2\2\u00e0\u00e3\3\2\2\2\u00e1\u00df\3\2\2\2\u00e1\u00e2\3\2"+ + "\2\2\u00e2\u00e5\3\2\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00db\3\2\2\2\u00e4"+ + "\u00e5\3\2\2\2\u00e5\u00e6\3\2\2\2\u00e6\u00e7\5\n\6\2\u00e7\t\3\2\2\2"+ + "\u00e8\u00f3\5\16\b\2\u00e9\u00ea\7C\2\2\u00ea\u00eb\7\17\2\2\u00eb\u00f0"+ + "\5\20\t\2\u00ec\u00ed\7\5\2\2\u00ed\u00ef\5\20\t\2\u00ee\u00ec\3\2\2\2"+ + "\u00ef\u00f2\3\2\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1\u00f4"+ + "\3\2\2\2\u00f2\u00f0\3\2\2\2\u00f3\u00e9\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4"+ + "\u00f6\3\2\2\2\u00f5\u00f7\5\f\7\2\u00f6\u00f5\3\2\2\2\u00f6\u00f7\3\2"+ + "\2\2\u00f7\13\3\2\2\2\u00f8\u00f9\7\65\2\2\u00f9\u00fe\t\b\2\2\u00fa\u00fb"+ + "\7`\2\2\u00fb\u00fc\t\b\2\2\u00fc\u00fe\7e\2\2\u00fd\u00f8\3\2\2\2\u00fd"+ + "\u00fa\3\2\2\2\u00fe\r\3\2\2\2\u00ff\u0105\5\22\n\2\u0100\u0101\7\3\2"+ + "\2\u0101\u0102\5\n\6\2\u0102\u0103\7\4\2\2\u0103\u0105\3\2\2\2\u0104\u00ff"+ + "\3\2\2\2\u0104\u0100\3\2\2\2\u0105\17\3\2\2\2\u0106\u0108\5,\27\2\u0107"+ + "\u0109\t\t\2\2\u0108\u0107\3\2\2\2\u0108\u0109\3\2\2\2\u0109\u010c\3\2"+ + "\2\2\u010a\u010b\7?\2\2\u010b\u010d\t\n\2\2\u010c\u010a\3\2\2\2\u010c"+ + "\u010d\3\2\2\2\u010d\21\3\2\2\2\u010e\u0110\7N\2\2\u010f\u0111\5\36\20"+ + "\2\u0110\u010f\3\2\2\2\u0110\u0111\3\2\2\2\u0111\u0112\3\2\2\2\u0112\u0117"+ + "\5 \21\2\u0113\u0114\7\5\2\2\u0114\u0116\5 \21\2\u0115\u0113\3\2\2\2\u0116"+ + "\u0119\3\2\2\2\u0117\u0115\3\2\2\2\u0117\u0118\3\2\2\2\u0118\u011b\3\2"+ + "\2\2\u0119\u0117\3\2\2\2\u011a\u011c\5\24\13\2\u011b\u011a\3\2\2\2\u011b"+ + "\u011c\3\2\2\2\u011c\u011f\3\2\2\2\u011d\u011e\7Z\2\2\u011e\u0120\5.\30"+ + "\2\u011f\u011d\3\2\2\2\u011f\u0120\3\2\2\2\u0120\u0124\3\2\2\2\u0121\u0122"+ + "\7)\2\2\u0122\u0123\7\17\2\2\u0123\u0125\5\26\f\2\u0124\u0121\3\2\2\2"+ + "\u0124\u0125\3\2\2\2\u0125\u0128\3\2\2\2\u0126\u0127\7*\2\2\u0127\u0129"+ + "\5.\30\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2\2\2\u0129\23\3\2\2\2\u012a"+ + "\u012b\7%\2\2\u012b\u0130\5\"\22\2\u012c\u012d\7\5\2\2\u012d\u012f\5\""+ + "\22\2\u012e\u012c\3\2\2\2\u012f\u0132\3\2\2\2\u0130\u012e\3\2\2\2\u0130"+ + "\u0131\3\2\2\2\u0131\25\3\2\2\2\u0132\u0130\3\2\2\2\u0133\u0135\5\36\20"+ + "\2\u0134\u0133\3\2\2\2\u0134\u0135\3\2\2\2\u0135\u0136\3\2\2\2\u0136\u013b"+ + "\5\30\r\2\u0137\u0138\7\5\2\2\u0138\u013a\5\30\r\2\u0139\u0137\3\2\2\2"+ + "\u013a\u013d\3\2\2\2\u013b\u0139\3\2\2\2\u013b\u013c\3\2\2\2\u013c\27"+ + "\3\2\2\2\u013d\u013b\3\2\2\2\u013e\u013f\5\32\16\2\u013f\31\3\2\2\2\u0140"+ + "\u0149\7\3\2\2\u0141\u0146\5,\27\2\u0142\u0143\7\5\2\2\u0143\u0145\5,"+ + "\27\2\u0144\u0142\3\2\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146"+ + "\u0147\3\2\2\2\u0147\u014a\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u0141\3\2"+ + "\2\2\u0149\u014a\3\2\2\2\u014a\u014b\3\2\2\2\u014b\u014e\7\4\2\2\u014c"+ + "\u014e\5,\27\2\u014d\u0140\3\2\2\2\u014d\u014c\3\2\2\2\u014e\33\3\2\2"+ + "\2\u014f\u0150\5`\61\2\u0150\u0151\7\f\2\2\u0151\u0152\7\3\2\2\u0152\u0153"+ + "\5\n\6\2\u0153\u0154\7\4\2\2\u0154\35\3\2\2\2\u0155\u0156\t\13\2\2\u0156"+ + "\37\3\2\2\2\u0157\u015c\5,\27\2\u0158\u015a\7\f\2\2\u0159\u0158\3\2\2"+ + "\2\u0159\u015a\3\2\2\2\u015a\u015b\3\2\2\2\u015b\u015d\5`\61\2\u015c\u0159"+ + "\3\2\2\2\u015c\u015d\3\2\2\2\u015d!\3\2\2\2\u015e\u0162\5*\26\2\u015f"+ + "\u0161\5$\23\2\u0160\u015f\3\2\2\2\u0161\u0164\3\2\2\2\u0162\u0160\3\2"+ + "\2\2\u0162\u0163\3\2\2\2\u0163#\3\2\2\2\u0164\u0162\3\2\2\2\u0165\u0166"+ + "\5&\24\2\u0166\u0167\7\61\2\2\u0167\u0169\5*\26\2\u0168\u016a\5(\25\2"+ + "\u0169\u0168\3\2\2\2\u0169\u016a\3\2\2\2\u016a\u0171\3\2\2\2\u016b\u016c"+ + "\7<\2\2\u016c\u016d\5&\24\2\u016d\u016e\7\61\2\2\u016e\u016f\5*\26\2\u016f"+ + "\u0171\3\2\2\2\u0170\u0165\3\2\2\2\u0170\u016b\3\2\2\2\u0171%\3\2\2\2"+ + "\u0172\u0174\7.\2\2\u0173\u0172\3\2\2\2\u0173\u0174\3\2\2\2\u0174\u0182"+ + "\3\2\2\2\u0175\u0177\7\63\2\2\u0176\u0178\7D\2\2\u0177\u0176\3\2\2\2\u0177"+ + "\u0178\3\2\2\2\u0178\u0182\3\2\2\2\u0179\u017b\7H\2\2\u017a\u017c\7D\2"+ + "\2\u017b\u017a\3\2\2\2\u017b\u017c\3\2\2\2\u017c\u0182\3\2\2\2\u017d\u017f"+ + "\7&\2\2\u017e\u0180\7D\2\2\u017f\u017e\3\2\2\2\u017f\u0180\3\2\2\2\u0180"+ + "\u0182\3\2\2\2\u0181\u0173\3\2\2\2\u0181\u0175\3\2\2\2\u0181\u0179\3\2"+ + "\2\2\u0181\u017d\3\2\2\2\u0182\'\3\2\2\2\u0183\u0184\7@\2\2\u0184\u0192"+ + "\5.\30\2\u0185\u0186\7X\2\2\u0186\u0187\7\3\2\2\u0187\u018c\5`\61\2\u0188"+ + "\u0189\7\5\2\2\u0189\u018b\5`\61\2\u018a\u0188\3\2\2\2\u018b\u018e\3\2"+ + "\2\2\u018c\u018a\3\2\2\2\u018c\u018d\3\2\2\2\u018d\u018f\3\2\2\2\u018e"+ + "\u018c\3\2\2\2\u018f\u0190\7\4\2\2\u0190\u0192\3\2\2\2\u0191\u0183\3\2"+ + "\2\2\u0191\u0185\3\2\2\2\u0192)\3\2\2\2\u0193\u0198\5b\62\2\u0194\u0196"+ + "\7\f\2\2\u0195\u0194\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2\2\2\u0197"+ + "\u0199\5^\60\2\u0198\u0195\3\2\2\2\u0198\u0199\3\2\2\2\u0199\u01ad\3\2"+ + "\2\2\u019a\u019b\7\3\2\2\u019b\u019c\5\n\6\2\u019c\u01a1\7\4\2\2\u019d"+ + "\u019f\7\f\2\2\u019e\u019d\3\2\2\2\u019e\u019f\3\2\2\2\u019f\u01a0\3\2"+ + "\2\2\u01a0\u01a2\5^\60\2\u01a1\u019e\3\2\2\2\u01a1\u01a2\3\2\2\2\u01a2"+ + "\u01ad\3\2\2\2\u01a3\u01a4\7\3\2\2\u01a4\u01a5\5\"\22\2\u01a5\u01aa\7"+ + "\4\2\2\u01a6\u01a8\7\f\2\2\u01a7\u01a6\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8"+ + "\u01a9\3\2\2\2\u01a9\u01ab\5^\60\2\u01aa\u01a7\3\2\2\2\u01aa\u01ab\3\2"+ + "\2\2\u01ab\u01ad\3\2\2\2\u01ac\u0193\3\2\2\2\u01ac\u019a\3\2\2\2\u01ac"+ + "\u01a3\3\2\2\2\u01ad+\3\2\2\2\u01ae\u01af\5.\30\2\u01af-\3\2\2\2\u01b0"+ + "\u01b1\b\30\1\2\u01b1\u01b2\7=\2\2\u01b2\u01d0\5.\30\n\u01b3\u01b4\7\37"+ + "\2\2\u01b4\u01b5\7\3\2\2\u01b5\u01b6\5\b\5\2\u01b6\u01b7\7\4\2\2\u01b7"+ + "\u01d0\3\2\2\2\u01b8\u01b9\7J\2\2\u01b9\u01ba\7\3\2\2\u01ba\u01bb\5j\66"+ + "\2\u01bb\u01bc\5\60\31\2\u01bc\u01bd\7\4\2\2\u01bd\u01d0\3\2\2\2\u01be"+ + "\u01bf\7\67\2\2\u01bf\u01c0\7\3\2\2\u01c0\u01c1\5^\60\2\u01c1\u01c2\7"+ + "\5\2\2\u01c2\u01c3\5j\66\2\u01c3\u01c4\5\60\31\2\u01c4\u01c5\7\4\2\2\u01c5"+ + "\u01d0\3\2\2\2\u01c6\u01c7\7\67\2\2\u01c7\u01c8\7\3\2\2\u01c8\u01c9\5"+ + "j\66\2\u01c9\u01ca\7\5\2\2\u01ca\u01cb\5j\66\2\u01cb\u01cc\5\60\31\2\u01cc"+ + "\u01cd\7\4\2\2\u01cd\u01d0\3\2\2\2\u01ce\u01d0\5\62\32\2\u01cf\u01b0\3"+ + "\2\2\2\u01cf\u01b3\3\2\2\2\u01cf\u01b8\3\2\2\2\u01cf\u01be\3\2\2\2\u01cf"+ + "\u01c6\3\2\2\2\u01cf\u01ce\3\2\2\2\u01d0\u01d9\3\2\2\2\u01d1\u01d2\f\4"+ + "\2\2\u01d2\u01d3\7\n\2\2\u01d3\u01d8\5.\30\5\u01d4\u01d5\f\3\2\2\u01d5"+ + "\u01d6\7B\2\2\u01d6\u01d8\5.\30\4\u01d7\u01d1\3\2\2\2\u01d7\u01d4\3\2"+ + "\2\2\u01d8\u01db\3\2\2\2\u01d9\u01d7\3\2\2\2\u01d9\u01da\3\2\2\2\u01da"+ + "/\3\2\2\2\u01db\u01d9\3\2\2\2\u01dc\u01dd\7\5\2\2\u01dd\u01df\5j\66\2"+ + "\u01de\u01dc\3\2\2\2\u01df\u01e2\3\2\2\2\u01e0\u01de\3\2\2\2\u01e0\u01e1"+ + "\3\2\2\2\u01e1\61\3\2\2\2\u01e2\u01e0\3\2\2\2\u01e3\u01e5\5<\37\2\u01e4"+ + "\u01e6\5\64\33\2\u01e5\u01e4\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6\63\3\2"+ + "\2\2\u01e7\u01e9\7=\2\2\u01e8\u01e7\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9"+ + "\u01ea\3\2\2\2\u01ea\u01eb\7\16\2\2\u01eb\u01ec\5<\37\2\u01ec\u01ed\7"+ + "\n\2\2\u01ed\u01ee\5<\37\2\u01ee\u0216\3\2\2\2\u01ef\u01f1\7=\2\2\u01f0"+ + "\u01ef\3\2\2\2\u01f0\u01f1\3\2\2\2\u01f1\u01f2\3\2\2\2\u01f2\u01f3\7-"+ + "\2\2\u01f3\u01f4\7\3\2\2\u01f4\u01f9\5<\37\2\u01f5\u01f6\7\5\2\2\u01f6"+ + "\u01f8\5<\37\2\u01f7\u01f5\3\2\2\2\u01f8\u01fb\3\2\2\2\u01f9\u01f7\3\2"+ + "\2\2\u01f9\u01fa\3\2\2\2\u01fa\u01fc\3\2\2\2\u01fb\u01f9\3\2\2\2\u01fc"+ + "\u01fd\7\4\2\2\u01fd\u0216\3\2\2\2\u01fe\u0200\7=\2\2\u01ff\u01fe\3\2"+ + "\2\2\u01ff\u0200\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0202\7-\2\2\u0202"+ + "\u0203\7\3\2\2\u0203\u0204\5\b\5\2\u0204\u0205\7\4\2\2\u0205\u0216\3\2"+ + "\2\2\u0206\u0208\7=\2\2\u0207\u0206\3\2\2\2\u0207\u0208\3\2\2\2\u0208"+ + "\u0209\3\2\2\2\u0209\u020a\7\64\2\2\u020a\u0216\58\35\2\u020b\u020d\7"+ + "=\2\2\u020c\u020b\3\2\2\2\u020c\u020d\3\2\2\2\u020d\u020e\3\2\2\2\u020e"+ + "\u020f\7I\2\2\u020f\u0216\5j\66\2\u0210\u0212\7\60\2\2\u0211\u0213\7="+ + "\2\2\u0212\u0211\3\2\2\2\u0212\u0213\3\2\2\2\u0213\u0214\3\2\2\2\u0214"+ + "\u0216\7>\2\2\u0215\u01e8\3\2\2\2\u0215\u01f0\3\2\2\2\u0215\u01ff\3\2"+ + "\2\2\u0215\u0207\3\2\2\2\u0215\u020c\3\2\2\2\u0215\u0210\3\2\2\2\u0216"+ + "\65\3\2\2\2\u0217\u0218\7\64\2\2\u0218\u0219\58\35\2\u0219\67\3\2\2\2"+ + "\u021a\u021c\5j\66\2\u021b\u021d\5:\36\2\u021c\u021b\3\2\2\2\u021c\u021d"+ + "\3\2\2\2\u021d9\3\2\2\2\u021e\u021f\7\35\2\2\u021f\u0225\5j\66\2\u0220"+ + "\u0221\7^\2\2\u0221\u0222\5j\66\2\u0222\u0223\7e\2\2\u0223\u0225\3\2\2"+ + "\2\u0224\u021e\3\2\2\2\u0224\u0220\3\2\2\2\u0225;\3\2\2\2\u0226\u0227"+ + "\b\37\1\2\u0227\u022b\5> \2\u0228\u0229\t\7\2\2\u0229\u022b\5<\37\6\u022a"+ + "\u0226\3\2\2\2\u022a\u0228\3\2\2\2\u022b\u0238\3\2\2\2\u022c\u022d\f\5"+ + "\2\2\u022d\u022e\t\f\2\2\u022e\u0237\5<\37\6\u022f\u0230\f\4\2\2\u0230"+ + "\u0231\t\7\2\2\u0231\u0237\5<\37\5\u0232\u0233\f\3\2\2\u0233\u0234\5T"+ + "+\2\u0234\u0235\5<\37\4\u0235\u0237\3\2\2\2\u0236\u022c\3\2\2\2\u0236"+ + "\u022f\3\2\2\2\u0236\u0232\3\2\2\2\u0237\u023a\3\2\2\2\u0238\u0236\3\2"+ + "\2\2\u0238\u0239\3\2\2\2\u0239=\3\2\2\2\u023a\u0238\3\2\2\2\u023b\u0250"+ + "\5B\"\2\u023c\u0250\5H%\2\u023d\u0250\5@!\2\u023e\u0250\5R*\2\u023f\u0240"+ + "\5^\60\2\u0240\u0241\7s\2\2\u0241\u0243\3\2\2\2\u0242\u023f\3\2\2\2\u0242"+ + "\u0243\3\2\2\2\u0243\u0244\3\2\2\2\u0244\u0250\7o\2\2\u0245\u0250\5L\'"+ + "\2\u0246\u0247\7\3\2\2\u0247\u0248\5\b\5\2\u0248\u0249\7\4\2\2\u0249\u0250"+ + "\3\2\2\2\u024a\u0250\5^\60\2\u024b\u024c\7\3\2\2\u024c\u024d\5,\27\2\u024d"+ + "\u024e\7\4\2\2\u024e\u0250\3\2\2\2\u024f\u023b\3\2\2\2\u024f\u023c\3\2"+ + "\2\2\u024f\u023d\3\2\2\2\u024f\u023e\3\2\2\2\u024f\u0242\3\2\2\2\u024f"+ + "\u0245\3\2\2\2\u024f\u0246\3\2\2\2\u024f\u024a\3\2\2\2\u024f\u024b\3\2"+ + "\2\2\u0250?\3\2\2\2\u0251\u0254\7\26\2\2\u0252\u0254\7\25\2\2\u0253\u0251"+ + "\3\2\2\2\u0253\u0252\3\2\2\2\u0254A\3\2\2\2\u0255\u0260\5D#\2\u0256\u0257"+ + "\7_\2\2\u0257\u0258\5D#\2\u0258\u0259\7e\2\2\u0259\u0260\3\2\2\2\u025a"+ + "\u0260\5F$\2\u025b\u025c\7_\2\2\u025c\u025d\5F$\2\u025d\u025e\7e\2\2\u025e"+ + "\u0260\3\2\2\2\u025f\u0255\3\2\2\2\u025f\u0256\3\2\2\2\u025f\u025a\3\2"+ + "\2\2\u025f\u025b\3\2\2\2\u0260C\3\2\2\2\u0261\u0262\7\20\2\2\u0262\u0263"+ + "\7\3\2\2\u0263\u0264\5,\27\2\u0264\u0265\7\f\2\2\u0265\u0266\5\\/\2\u0266"+ + "\u0267\7\4\2\2\u0267E\3\2\2\2\u0268\u0269\7\24\2\2\u0269\u026a\7\3\2\2"+ + "\u026a\u026b\5,\27\2\u026b\u026c\7\5\2\2\u026c\u026d\5\\/\2\u026d\u026e"+ + "\7\4\2\2\u026eG\3\2\2\2\u026f\u0275\5J&\2\u0270\u0271\7_\2\2\u0271\u0272"+ + "\5J&\2\u0272\u0273\7e\2\2\u0273\u0275\3\2\2\2\u0274\u026f\3\2\2\2\u0274"+ + "\u0270\3\2\2\2\u0275I\3\2\2\2\u0276\u0277\7!\2\2\u0277\u0278\7\3\2\2\u0278"+ + "\u0279\5`\61\2\u0279\u027a\7%\2\2\u027a\u027b\5<\37\2\u027b\u027c\7\4"+ + "\2\2\u027cK\3\2\2\2\u027d\u0283\5N(\2\u027e\u027f\7_\2\2\u027f\u0280\5"+ + "N(\2\u0280\u0281\7e\2\2\u0281\u0283\3\2\2\2\u0282\u027d\3\2\2\2\u0282"+ + "\u027e\3\2\2\2\u0283M\3\2\2\2\u0284\u0285\5P)\2\u0285\u0291\7\3\2\2\u0286"+ + "\u0288\5\36\20\2\u0287\u0286\3\2\2\2\u0287\u0288\3\2\2\2\u0288\u0289\3"+ + "\2\2\2\u0289\u028e\5,\27\2\u028a\u028b\7\5\2\2\u028b\u028d\5,\27\2\u028c"+ + "\u028a\3\2\2\2\u028d\u0290\3\2\2\2\u028e\u028c\3\2\2\2\u028e\u028f\3\2"+ + "\2\2\u028f\u0292\3\2\2\2\u0290\u028e\3\2\2\2\u0291\u0287\3\2\2\2\u0291"+ + "\u0292\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0294\7\4\2\2\u0294O\3\2\2\2"+ + "\u0295\u0299\7\63\2\2\u0296\u0299\7H\2\2\u0297\u0299\5`\61\2\u0298\u0295"+ + "\3\2\2\2\u0298\u0296\3\2\2\2\u0298\u0297\3\2\2\2\u0299Q\3\2\2\2\u029a"+ + "\u02b5\7>\2\2\u029b\u02b5\5X-\2\u029c\u02b5\5h\65\2\u029d\u02b5\5V,\2"+ + "\u029e\u02a0\7u\2\2\u029f\u029e\3\2\2\2\u02a0\u02a1\3\2\2\2\u02a1\u029f"+ + "\3\2\2\2\u02a1\u02a2\3\2\2\2\u02a2\u02b5\3\2\2\2\u02a3\u02b5\7t\2\2\u02a4"+ + "\u02a5\7a\2\2\u02a5\u02a6\5j\66\2\u02a6\u02a7\7e\2\2\u02a7\u02b5\3\2\2"+ + "\2\u02a8\u02a9\7b\2\2\u02a9\u02aa\5j\66\2\u02aa\u02ab\7e\2\2\u02ab\u02b5"+ + "\3\2\2\2\u02ac\u02ad\7c\2\2\u02ad\u02ae\5j\66\2\u02ae\u02af\7e\2\2\u02af"+ + "\u02b5\3\2\2\2\u02b0\u02b1\7d\2\2\u02b1\u02b2\5j\66\2\u02b2\u02b3\7e\2"+ + "\2\u02b3\u02b5\3\2\2\2\u02b4\u029a\3\2\2\2\u02b4\u029b\3\2\2\2\u02b4\u029c"+ + "\3\2\2\2\u02b4\u029d\3\2\2\2\u02b4\u029f\3\2\2\2\u02b4\u02a3\3\2\2\2\u02b4"+ + "\u02a4\3\2\2\2\u02b4\u02a8\3\2\2\2\u02b4\u02ac\3\2\2\2\u02b4\u02b0\3\2"+ + "\2\2\u02b5S\3\2\2\2\u02b6\u02b7\t\r\2\2\u02b7U\3\2\2\2\u02b8\u02b9\t\16"+ + "\2\2\u02b9W\3\2\2\2\u02ba\u02bc\7/\2\2\u02bb\u02bd\t\7\2\2\u02bc\u02bb"+ + "\3\2\2\2\u02bc\u02bd\3\2\2\2\u02bd\u02c0\3\2\2\2\u02be\u02c1\5h\65\2\u02bf"+ + "\u02c1\5j\66\2\u02c0\u02be\3\2\2\2\u02c0\u02bf\3\2\2\2\u02c1\u02c2\3\2"+ + "\2\2\u02c2\u02c5\5Z.\2\u02c3\u02c4\7U\2\2\u02c4\u02c6\5Z.\2\u02c5\u02c3"+ + "\3\2\2\2\u02c5\u02c6\3\2\2\2\u02c6Y\3\2\2\2\u02c7\u02c8\t\17\2\2\u02c8"+ + "[\3\2\2\2\u02c9\u02ca\5`\61\2\u02ca]\3\2\2\2\u02cb\u02cc\5`\61\2\u02cc"+ + "\u02cd\7s\2\2\u02cd\u02cf\3\2\2\2\u02ce\u02cb\3\2\2\2\u02cf\u02d2\3\2"+ + "\2\2\u02d0\u02ce\3\2\2\2\u02d0\u02d1\3\2\2\2\u02d1\u02d3\3\2\2\2\u02d2"+ + "\u02d0\3\2\2\2\u02d3\u02d4\5`\61\2\u02d4_\3\2\2\2\u02d5\u02d8\5d\63\2"+ + "\u02d6\u02d8\5f\64\2\u02d7\u02d5\3\2\2\2\u02d7\u02d6\3\2\2\2\u02d8a\3"+ + "\2\2\2\u02d9\u02da\5`\61\2\u02da\u02db\7\6\2\2\u02db\u02dd\3\2\2\2\u02dc"+ + "\u02d9\3\2\2\2\u02dc\u02dd\3\2\2\2\u02dd\u02de\3\2\2\2\u02de\u02e6\7z"+ + "\2\2\u02df\u02e0\5`\61\2\u02e0\u02e1\7\6\2\2\u02e1\u02e3\3\2\2\2\u02e2"+ + "\u02df\3\2\2\2\u02e2\u02e3\3\2\2\2\u02e3\u02e4\3\2\2\2\u02e4\u02e6\5`"+ + "\61\2\u02e5\u02dc\3\2\2\2\u02e5\u02e2\3\2\2\2\u02e6c\3\2\2\2\u02e7\u02ea"+ + "\7{\2\2\u02e8\u02ea\7|\2\2\u02e9\u02e7\3\2\2\2\u02e9\u02e8\3\2\2\2\u02ea"+ + "e\3\2\2\2\u02eb\u02ef\7x\2\2\u02ec\u02ef\5l\67\2\u02ed\u02ef\7y\2\2\u02ee"+ + "\u02eb\3\2\2\2\u02ee\u02ec\3\2\2\2\u02ee\u02ed\3\2\2\2\u02efg\3\2\2\2"+ + "\u02f0\u02f3\7w\2\2\u02f1\u02f3\7v\2\2\u02f2\u02f0\3\2\2\2\u02f2\u02f1"+ + "\3\2\2\2\u02f3i\3\2\2\2\u02f4\u02f5\t\20\2\2\u02f5k\3\2\2\2\u02f6\u02f7"+ + "\t\21\2\2\u02f7m\3\2\2\2g}\177\u0083\u008c\u008e\u0092\u0099\u00a0\u00a5"+ "\u00aa\u00b2\u00b6\u00be\u00c1\u00c7\u00cc\u00cf\u00d4\u00d7\u00d9\u00e1"+ "\u00e4\u00f0\u00f3\u00f6\u00fd\u0104\u0108\u010c\u0110\u0117\u011b\u011f"+ "\u0124\u0128\u0130\u0134\u013b\u0146\u0149\u014d\u0159\u015c\u0162\u0169"+ "\u0170\u0173\u0177\u017b\u017f\u0181\u018c\u0191\u0195\u0198\u019e\u01a1"+ "\u01a7\u01aa\u01ac\u01cf\u01d7\u01d9\u01e0\u01e5\u01e8\u01f0\u01f9\u01ff"+ - "\u0207\u020c\u0212\u0215\u021c\u0224\u022a\u0236\u0238\u0242\u024f\u025b"+ - "\u0267\u026c\u026f\u0271\u027f\u028d\u0292\u0299\u029c\u02a3\u02ac\u02bf"+ - "\u02c7\u02cb\u02d0\u02db\u02e2\u02e7\u02ed\u02f0\u02f4\u02f9\u02fd"; + "\u0207\u020c\u0212\u0215\u021c\u0224\u022a\u0236\u0238\u0242\u024f\u0253"+ + "\u025f\u0274\u0282\u0287\u028e\u0291\u0298\u02a1\u02b4\u02bc\u02c0\u02c5"+ + "\u02d0\u02d7\u02dc\u02e2\u02e5\u02e9\u02ee\u02f2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java index ed64045191be0..494acf262902b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java @@ -408,23 +408,23 @@ interface SqlBaseVisitor extends ParseTreeVisitor { */ T visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); /** - * Visit a parse tree produced by {@link SqlBaseParser#castExpression}. + * Visit a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. * @param ctx the parse tree * @return the visitor result */ - T visitCastExpression(SqlBaseParser.CastExpressionContext ctx); + T visitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); /** - * Visit a parse tree produced by {@link SqlBaseParser#castTemplate}. + * Visit a parse tree produced by {@link SqlBaseParser#castExpression}. * @param ctx the parse tree * @return the visitor result */ - T visitCastTemplate(SqlBaseParser.CastTemplateContext ctx); + T visitCastExpression(SqlBaseParser.CastExpressionContext ctx); /** - * Visit a parse tree produced by {@link SqlBaseParser#builtinDateTimeFunction}. + * Visit a parse tree produced by {@link SqlBaseParser#castTemplate}. * @param ctx the parse tree * @return the visitor result */ - T visitBuiltinDateTimeFunction(SqlBaseParser.BuiltinDateTimeFunctionContext ctx); + T visitCastTemplate(SqlBaseParser.CastTemplateContext ctx); /** * Visit a parse tree produced by {@link SqlBaseParser#convertTemplate}. * @param ctx the parse tree diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java index 0bc02c1ba6f27..6166d87703ead 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.sql.parser; -import com.carrotsearch.hppc.ObjectShortHashMap; - import org.antlr.v4.runtime.BaseErrorListener; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonToken; @@ -27,18 +25,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BackQuotedIdentifierContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PrimaryExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryPrimaryDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryTermContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QuoteIdentifierContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.UnquoteIdentifierContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionDefaultContext; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; @@ -53,7 +39,6 @@ import java.util.function.Function; import static java.lang.String.format; -import static org.elasticsearch.xpack.sql.parser.AbstractBuilder.source; public class SqlParser { @@ -103,45 +88,49 @@ private T invokeParser(String sql, List params, Function parseFunction, BiFunction visitor) { - SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); + try { + SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); - lexer.removeErrorListeners(); - lexer.addErrorListener(ERROR_LISTENER); + lexer.removeErrorListeners(); + lexer.addErrorListener(ERROR_LISTENER); - Map paramTokens = new HashMap<>(); - TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); + Map paramTokens = new HashMap<>(); + TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); - CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); - SqlBaseParser parser = new SqlBaseParser(tokenStream); + CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); + SqlBaseParser parser = new SqlBaseParser(tokenStream); - parser.addParseListener(new CircuitBreakerListener()); - parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); + parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); - parser.removeErrorListeners(); - parser.addErrorListener(ERROR_LISTENER); + parser.removeErrorListeners(); + parser.addErrorListener(ERROR_LISTENER); - parser.getInterpreter().setPredictionMode(PredictionMode.SLL); + parser.getInterpreter().setPredictionMode(PredictionMode.SLL); - if (DEBUG) { - debug(parser); - tokenStream.fill(); + if (DEBUG) { + debug(parser); + tokenStream.fill(); - for (Token t : tokenStream.getTokens()) { - String symbolicName = SqlBaseLexer.VOCABULARY.getSymbolicName(t.getType()); - String literalName = SqlBaseLexer.VOCABULARY.getLiteralName(t.getType()); - log.info(format(Locale.ROOT, " %-15s '%s'", + for (Token t : tokenStream.getTokens()) { + String symbolicName = SqlBaseLexer.VOCABULARY.getSymbolicName(t.getType()); + String literalName = SqlBaseLexer.VOCABULARY.getLiteralName(t.getType()); + log.info(format(Locale.ROOT, " %-15s '%s'", symbolicName == null ? literalName : symbolicName, t.getText())); + } } - } - ParserRuleContext tree = parseFunction.apply(parser); + ParserRuleContext tree = parseFunction.apply(parser); - if (DEBUG) { - log.info("Parse tree {} " + tree.toStringTree()); - } + if (DEBUG) { + log.info("Parse tree {} " + tree.toStringTree()); + } - return visitor.apply(new AstBuilder(paramTokens), tree); + return visitor.apply(new AstBuilder(paramTokens), tree); + } catch (StackOverflowError e) { + throw new ParsingException("SQL statement is too large, " + + "causing stack overflow when generating the parsing tree: [{}]", sql); + } } private static void debug(SqlBaseParser parser) { @@ -224,91 +213,6 @@ public void exitNonReserved(SqlBaseParser.NonReservedContext context) { } } - /** - * Used to catch large expressions that can lead to stack overflows - */ - static class CircuitBreakerListener extends SqlBaseBaseListener { - - private static final short MAX_RULE_DEPTH = 200; - - /** - * Due to the structure of the grammar and our custom handling in {@link ExpressionBuilder} - * some expressions can exit with a different class than they entered: - * e.g.: ValueExpressionContext can exit as ValueExpressionDefaultContext - */ - private static final Map ENTER_EXIT_RULE_MAPPING = new HashMap<>(); - - static { - ENTER_EXIT_RULE_MAPPING.put(StatementDefaultContext.class.getSimpleName(), StatementContext.class.getSimpleName()); - ENTER_EXIT_RULE_MAPPING.put(QueryPrimaryDefaultContext.class.getSimpleName(), QueryTermContext.class.getSimpleName()); - ENTER_EXIT_RULE_MAPPING.put(BooleanDefaultContext.class.getSimpleName(), BooleanExpressionContext.class.getSimpleName()); - ENTER_EXIT_RULE_MAPPING.put(ValueExpressionDefaultContext.class.getSimpleName(), ValueExpressionContext.class.getSimpleName()); - } - - private boolean insideIn = false; - - // Keep current depth for every rule visited. - // The totalDepth alone cannot be used as expressions like: e1 OR e2 OR e3 OR ... - // are processed as e1 OR (e2 OR (e3 OR (... and this results in the totalDepth not growing - // while the stack call depth is, leading to a StackOverflowError. - private ObjectShortHashMap depthCounts = new ObjectShortHashMap<>(); - - @Override - public void enterEveryRule(ParserRuleContext ctx) { - if (inDetected(ctx)) { - insideIn = true; - } - - // Skip PrimaryExpressionContext for IN as it's not visited on exit due to - // the grammar's peculiarity rule with "predicated" and "predicate". - // Also skip the Identifiers as they are "cheap". - if (ctx.getClass() != UnquoteIdentifierContext.class && - ctx.getClass() != QuoteIdentifierContext.class && - ctx.getClass() != BackQuotedIdentifierContext.class && - (insideIn == false || ctx.getClass() != PrimaryExpressionContext.class)) { - - int currentDepth = depthCounts.putOrAdd(ctx.getClass().getSimpleName(), (short) 1, (short) 1); - if (currentDepth > MAX_RULE_DEPTH) { - throw new ParsingException(source(ctx), "SQL statement too large; " + - "halt parsing to prevent memory errors (stopped at depth {})", MAX_RULE_DEPTH); - } - } - super.enterEveryRule(ctx); - } - - @Override - public void exitEveryRule(ParserRuleContext ctx) { - if (inDetected(ctx)) { - insideIn = false; - } - - decrementCounter(ctx); - super.exitEveryRule(ctx); - } - - ObjectShortHashMap depthCounts() { - return depthCounts; - } - - private void decrementCounter(ParserRuleContext ctx) { - String className = ctx.getClass().getSimpleName(); - String classNameToDecrement = ENTER_EXIT_RULE_MAPPING.getOrDefault(className, className); - - // Avoid having negative numbers - if (depthCounts.containsKey(classNameToDecrement)) { - depthCounts.putOrAdd(classNameToDecrement, (short) 0, (short) -1); - } - } - - private boolean inDetected(ParserRuleContext ctx) { - if (ctx.getParent() != null && ctx.getParent().getClass() == SqlBaseParser.PredicateContext.class) { - SqlBaseParser.PredicateContext pc = (SqlBaseParser.PredicateContext) ctx.getParent(); - return pc.kind != null && pc.kind.getType() == SqlBaseParser.IN; - } - return false; - } - } - private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { @Override public void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index ffe0140c9dcaa..68cfefe7fb572 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.plan.logical.command.sys; +import org.apache.lucene.util.Counter; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.util.StringUtils; import java.sql.DatabaseMetaData; import java.util.ArrayList; @@ -96,45 +98,68 @@ private List output(boolean odbcCompatible) { @Override public void execute(SqlSession session, ActionListener listener) { - boolean isOdbcClient = session.configuration().mode() == Mode.ODBC; - List output = output(isOdbcClient); + Mode mode = session.configuration().mode(); + List output = output(mode == Mode.ODBC); String cluster = session.indexResolver().clusterName(); // bail-out early if the catalog is present but differs - if (Strings.hasText(catalog) && !cluster.equals(catalog)) { + if (Strings.hasText(catalog) && cluster.equals(catalog) == false) { listener.onResponse(Rows.empty(output)); return; } + // save original index name (as the pattern can contain special chars) + String indexName = index != null ? index : (pattern != null ? StringUtils.likeToUnescaped(pattern.pattern(), + pattern.escape()) : ""); String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; - session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { - List> rows = new ArrayList<>(); - for (EsIndex esIndex : esIndices) { - fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, isOdbcClient); - } + // special case fo '%' (translated to *) + if ("*".equals(idx)) { + session.indexResolver().resolveAsSeparateMappings(idx, regex, ActionListener.wrap(esIndices -> { + List> rows = new ArrayList<>(); + for (EsIndex esIndex : esIndices) { + fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); + } + + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } + // otherwise use a merged mapping + else { + session.indexResolver().resolveAsMergedMapping(idx, regex, ActionListener.wrap(r -> { + List> rows = new ArrayList<>(); + // populate the data only when a target is found + if (r.isValid() == true) { + EsIndex esIndex = r.get(); + fillInRows(cluster, indexName, esIndex.mapping(), null, rows, columnMatcher, mode); + } - listener.onResponse(Rows.of(output, rows)); - }, listener::onFailure)); + listener.onResponse(Rows.of(output, rows)); + }, listener::onFailure)); + } } static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, - Pattern columnMatcher, boolean isOdbcClient) { - int pos = 0; + Pattern columnMatcher, Mode mode) { + fillInRows(clusterName, indexName, mapping, prefix, rows, columnMatcher, Counter.newCounter(), mode); + } + + private static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, + Pattern columnMatcher, Counter position, Mode mode) { + boolean isOdbcClient = mode == Mode.ODBC; for (Map.Entry entry : mapping.entrySet()) { - pos++; // JDBC is 1-based so we start with 1 here + position.addAndGet(1); // JDBC is 1-based so we start with 1 here String name = entry.getKey(); name = prefix != null ? prefix + "." + name : name; EsField field = entry.getValue(); DataType type = field.getDataType(); - // skip the nested and object types only for ODBC - // https://github.com/elastic/elasticsearch/issues/35376 - if (type.isPrimitive() || !isOdbcClient) { + // skip the nested, object and unsupported types + if (type.isPrimitive()) { if (columnMatcher == null || columnMatcher.matcher(name).matches()) { rows.add(asList(clusterName, // schema is not supported @@ -162,7 +187,7 @@ static void fillInRows(String clusterName, String indexName, Map liste // namely one param specified with '%', everything else empty string // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments - if (clusterPattern != null && clusterPattern.pattern().equals(SQL_WILDCARD)) { - if ((pattern == null || pattern.pattern().isEmpty()) && CollectionUtils.isEmpty(types)) { + // catalog enumeration + if (clusterPattern == null || clusterPattern.pattern().equals(SQL_WILDCARD)) { + // enumerate only if pattern is "" and no types are specified (types is null) + if (pattern != null && pattern.pattern().isEmpty() && index == null && types == null) { Object[] enumeration = new Object[10]; // send only the cluster, everything else null enumeration[0] = cluster; @@ -87,12 +88,15 @@ public final void execute(SqlSession session, ActionListener liste } } - // if no types were specified (the parser takes care of the % case) - if (IndexType.VALID.equals(types)) { - if ((clusterPattern == null || clusterPattern.pattern().isEmpty()) - && (pattern == null || pattern.pattern().isEmpty())) { + // enumerate types + // if no types are specified (the parser takes care of the % case) + if (types == null) { + // empty string for catalog + if (clusterPattern != null && clusterPattern.pattern().isEmpty() + // empty string for table like and no index specified + && pattern != null && pattern.pattern().isEmpty() && index == null) { List> values = new ArrayList<>(); - // send only the types, everything else null + // send only the types, everything else is made of empty strings for (IndexType type : IndexType.VALID) { Object[] enumeration = new Object[10]; enumeration[3] = type.toSql(); @@ -105,7 +109,7 @@ public final void execute(SqlSession session, ActionListener liste } } - + // no enumeration pattern found, list actual tables String cRegex = clusterPattern != null ? clusterPattern.asJavaRegex() : null; // if the catalog doesn't match, don't return any results diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java index 5bda469853d6b..b6651821a9b64 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java @@ -5,14 +5,14 @@ */ package org.elasticsearch.xpack.sql.planner; -import java.util.List; -import java.util.Map; - import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.Verifier.Failure; import org.elasticsearch.xpack.sql.tree.Node; +import java.util.List; +import java.util.Map; + import static java.util.stream.Collectors.toMap; public class Planner { @@ -64,4 +64,4 @@ public Map, String> verifyExecutingPlanFailures(PhysicalPlan plan) { List failures = Verifier.verifyExecutingPlan(plan); return failures.stream().collect(toMap(Failure::source, Failure::message)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index ef765e31f3b80..56554185ce84b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Foldables; -import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.Order; import org.elasticsearch.xpack.sql.expression.function.Function; @@ -28,7 +27,6 @@ import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; -import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.AggPathInput; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; @@ -65,9 +63,7 @@ import org.elasticsearch.xpack.sql.rule.RuleExecutor; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.util.Check; -import org.elasticsearch.xpack.sql.util.DateUtils; -import java.time.ZoneId; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -152,7 +148,8 @@ protected PhysicalPlan rule(ProjectExec project) { queryC.pseudoFunctions(), new AttributeMap<>(processors), queryC.sort(), - queryC.limit()); + queryC.limit(), + queryC.shouldTrackHits()); return new EsQueryExec(exec.source(), exec.index(), project.output(), clone); } return project; @@ -180,7 +177,8 @@ protected PhysicalPlan rule(FilterExec plan) { qContainer.pseudoFunctions(), qContainer.scalarFunctions(), qContainer.sort(), - qContainer.limit()); + qContainer.limit(), + qContainer.shouldTrackHits()); return exec.with(qContainer); } @@ -291,7 +289,7 @@ protected PhysicalPlan rule(AggregateExec a) { if (matchingGroup != null) { if (exp instanceof Attribute || exp instanceof ScalarFunction || exp instanceof GroupingFunction) { Processor action = null; - ZoneId zi = exp.dataType().isDateBased() ? DateUtils.UTC : null; + boolean isDateBased = exp.dataType().isDateBased(); /* * special handling of dates since aggs return the typed Date object which needs * extraction instead of handling this in the scroller, the folder handles this @@ -299,9 +297,10 @@ protected PhysicalPlan rule(AggregateExec a) { */ if (exp instanceof DateTimeHistogramFunction) { action = ((UnaryPipe) p).action(); - zi = ((DateTimeFunction) exp).zoneId(); + isDateBased = true; } - return new AggPathInput(exp.source(), exp, new GroupByRef(matchingGroup.id(), null, zi), action); + return new AggPathInput(exp.source(), exp, + new GroupByRef(matchingGroup.id(), null, isDateBased), action); } } // or found an aggregate expression (which has to work on an attribute used for grouping) @@ -339,15 +338,12 @@ protected PhysicalPlan rule(AggregateExec a) { // attributes can only refer to declared groups if (child instanceof Attribute) { Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(child)); - // check if the field is a date - if so mark it as such to interpret the long as a date - // UTC is used since that's what the server uses and there's no conversion applied - // (like for date histograms) - ZoneId zi = child.dataType().isDateBased() ? DateUtils.UTC : null; - queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, zi), ((Attribute) child)); + queryC = queryC.addColumn( + new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()), ((Attribute) child)); } // handle histogram else if (child instanceof GroupingFunction) { - queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, null), + queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()), ((GroupingFunction) child).toAttribute()); } // fallback to regular agg functions @@ -368,8 +364,8 @@ else if (child instanceof GroupingFunction) { matchingGroup = groupingContext.groupFor(ne); Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(ne)); - ZoneId zi = ne.dataType().isDateBased() ? DateUtils.UTC : null; - queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, zi), ne.toAttribute()); + queryC = queryC.addColumn( + new GroupByRef(matchingGroup.id(), null, ne.dataType().isDateBased()), ne.toAttribute()); } } } @@ -391,10 +387,16 @@ private Tuple addAggFunction(GroupByKey groupingAg if (f instanceof Count) { Count c = (Count) f; // COUNT(*) or COUNT() - if (c.field() instanceof Literal) { - AggRef ref = groupingAgg == null ? - GlobalCountRef.INSTANCE : - new GroupByRef(groupingAgg.id(), Property.COUNT, null); + if (c.field().foldable()) { + AggRef ref = null; + + if (groupingAgg == null) { + ref = GlobalCountRef.INSTANCE; + // if the count points to the total track hits, enable accurate count retrieval + queryC = queryC.withTrackHits(); + } else { + ref = new GroupByRef(groupingAgg.id(), Property.COUNT, false); + } Map pseudoFunctions = new LinkedHashMap<>(queryC.pseudoFunctions()); pseudoFunctions.put(functionId, groupingAgg); @@ -402,11 +404,11 @@ private Tuple addAggFunction(GroupByKey groupingAg // COUNT() } else if (!c.distinct()) { LeafAgg leafAgg = toAgg(functionId, f); - AggPathInput a = new AggPathInput(f, new MetricAggRef(leafAgg.id(), "doc_count", "_count")); + AggPathInput a = new AggPathInput(f, new MetricAggRef(leafAgg.id(), "doc_count", "_count", false)); queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); return new Tuple<>(queryC, a); } - // the only variant left - COUNT(DISTINCT) - will be covered by the else branch below + // the only variant left - COUNT(DISTINCT) - will be covered by the else branch below as it maps to an aggregation } AggPathInput aggInput = null; @@ -428,14 +430,16 @@ private Tuple addAggFunction(GroupByKey groupingAg // FIXME: concern leak - hack around MatrixAgg which is not // generalized (afaik) aggInput = new AggPathInput(f, - new MetricAggRef(cAggPath, ia.innerName(), ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null)); + new MetricAggRef(cAggPath, ia.innerName(), + ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null, + ia.dataType().isDateBased())); } else { LeafAgg leafAgg = toAgg(functionId, f); if (f instanceof TopHits) { aggInput = new AggPathInput(f, new TopHitsAggRef(leafAgg.id(), f.dataType())); } else { - aggInput = new AggPathInput(f, new MetricAggRef(leafAgg.id())); + aggInput = new AggPathInput(f, new MetricAggRef(leafAgg.id(), f.dataType().isDateBased())); } queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index de529b2e4ca61..2dea3fa10511a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -105,7 +105,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Supplier; import static java.util.Collections.singletonList; @@ -258,10 +257,7 @@ static GroupingContext groupBy(List groupings) { // change analyzed to non non-analyzed attributes if (exp instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) exp; - if (fa.isInexact()) { - ne = fa.exactAttribute(); - } + ne = ((FieldAttribute) exp).exactAttribute(); } // handle functions differently @@ -449,7 +445,7 @@ static String field(AggregateFunction af) { // COUNT(DISTINCT) uses cardinality aggregation which works on exact values (not changed by analyzers or normalizers) if (af instanceof Count && ((Count) af).distinct()) { // use the `keyword` version of the field, if there is one - return field.isInexact() ? field.exactAttribute().name() : field.name(); + return field.exactAttribute().name(); } return field.name(); } @@ -471,43 +467,30 @@ private static String topAggsField(AggregateFunction af, Expression e) { af.nodeString()); } - // TODO: need to optimize on ngram // TODO: see whether escaping is needed + @SuppressWarnings("rawtypes") static class Likes extends ExpressionTranslator { @Override protected QueryTranslation asQuery(RegexMatch e, boolean onAggs) { Query q = null; - boolean inexact = true; - String target = null; + String targetFieldName = null; if (e.field() instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) e.field(); - inexact = fa.isInexact(); - target = nameOf(inexact ? fa : fa.exactAttribute()); + targetFieldName = nameOf(((FieldAttribute) e.field()).exactAttribute()); } else { - throw new SqlIllegalArgumentException("Scalar function ({}) not allowed (yet) as arguments for LIKE", + throw new SqlIllegalArgumentException("Scalar function [{}] not allowed (yet) as argument for " + e.functionName(), Expressions.name(e.field())); } if (e instanceof Like) { LikePattern p = ((Like) e).pattern(); - if (inexact) { - q = new QueryStringQuery(e.source(), p.asLuceneWildcard(), target); - } - else { - q = new WildcardQuery(e.source(), nameOf(e.field()), p.asLuceneWildcard()); - } + q = new WildcardQuery(e.source(), targetFieldName, p.asLuceneWildcard()); } if (e instanceof RLike) { String pattern = ((RLike) e).pattern(); - if (inexact) { - q = new QueryStringQuery(e.source(), "/" + pattern + "/", target); - } - else { - q = new RegexQuery(e.source(), nameOf(e.field()), pattern); - } + q = new RegexQuery(e.source(), targetFieldName, pattern); } return q != null ? new QueryTranslation(wrapIfNested(q, e.field())) : null; @@ -684,12 +667,9 @@ private static Query translateQuery(BinaryComparison bc) { } if (bc instanceof Equals || bc instanceof NullEquals || bc instanceof NotEquals) { if (bc.left() instanceof FieldAttribute) { - FieldAttribute fa = (FieldAttribute) bc.left(); // equality should always be against an exact match // (which is important for strings) - if (fa.isInexact()) { - name = fa.exactAttribute().name(); - } + name = ((FieldAttribute) bc.left()).exactAttribute().name(); } Query query = new TermQuery(source, name, value); if (bc instanceof NotEquals) { @@ -708,16 +688,6 @@ static class InComparisons extends ExpressionTranslator { @Override protected QueryTranslation asQuery(In in, boolean onAggs) { - Optional firstNotFoldable = in.list().stream().filter(expression -> !expression.foldable()).findFirst(); - - if (firstNotFoldable.isPresent()) { - throw new SqlIllegalArgumentException( - "Line {}:{}: Comparisons against variables are not (currently) supported; offender [{}] in [{}]", - firstNotFoldable.get().sourceLocation().getLineNumber(), - firstNotFoldable.get().sourceLocation().getColumnNumber(), - Expressions.name(firstNotFoldable.get()), - in.name()); - } if (in.value() instanceof NamedExpression) { NamedExpression ne = (NamedExpression) in.value(); @@ -735,7 +705,9 @@ protected QueryTranslation asQuery(In in, boolean onAggs) { else { Query q = null; if (in.value() instanceof FieldAttribute) { - q = new TermsQuery(in.source(), ne.name(), in.list()); + FieldAttribute fa = (FieldAttribute) in.value(); + // equality should always be against an exact match (which is important for strings) + q = new TermsQuery(in.source(), fa.exactAttribute().name(), in.list()); } else { q = new ScriptQuery(in.source(), in.asScript()); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 62963a99b2a98..f4e3e006e70f9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -224,7 +224,7 @@ String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { boolean header = hasHeader(request); - if (header) { + if (header && (cursor == null || cursor == Cursor.EMPTY)) { row(sb, response.columns(), ColumnInfo::name); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java index 8bfe08e078476..d0c67f193b710 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java @@ -47,7 +47,7 @@ public static void operation(PlanExecutor planExecutor, SqlClearCursorRequest re Cursor cursor = Cursors.decodeFromString(request.getCursor()); planExecutor.cleanCursor( new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, - request.mode(), StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY), + request.mode(), StringUtils.EMPTY, StringUtils.EMPTY, StringUtils.EMPTY, Protocol.FIELD_MULTI_VALUE_LENIENCY), cursor, ActionListener.wrap( success -> listener.onResponse(new SqlClearCursorResponse(success)), listener::onFailure)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 49c296a51055b..b1ba7d899f5e3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -72,7 +72,7 @@ public static void operation(PlanExecutor planExecutor, SqlQueryRequest request, // The configuration is always created however when dealing with the next page, only the timeouts are relevant // the rest having default values (since the query is already created) Configuration cfg = new Configuration(request.zoneId(), request.fetchSize(), request.requestTimeout(), request.pageTimeout(), - request.filter(), request.mode(), request.clientId(), username, clusterName); + request.filter(), request.mode(), request.clientId(), username, clusterName, request.fieldMultiValueLeniency()); if (Strings.hasText(request.cursor()) == false) { planExecutor.sql(cfg, request.query(), request.params(), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java index 0bda719111589..0b9132df29025 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.sql.action.SqlTranslateRequest; import org.elasticsearch.xpack.sql.action.SqlTranslateResponse; import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.session.Configuration; import static org.elasticsearch.xpack.sql.plugin.Transports.clusterName; @@ -53,9 +54,9 @@ protected void doExecute(Task task, SqlTranslateRequest request, ActionListener< sqlLicenseChecker.checkIfSqlAllowed(request.mode()); Configuration cfg = new Configuration(request.zoneId(), request.fetchSize(), - request.requestTimeout(), request.pageTimeout(), request.filter(), + request.requestTimeout(), request.pageTimeout(), request.filter(), request.mode(), request.clientId(), - username(securityContext), clusterName(clusterService)); + username(securityContext), clusterName(clusterService), Protocol.FIELD_MULTI_VALUE_LENIENCY); planExecutor.searchSource(cfg, request.query(), request.params(), ActionListener.wrap( searchSourceBuilder -> listener.onResponse(new SqlTranslateResponse(searchSourceBuilder)), listener::onFailure)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java index 6f26ee1dd960c..9638a1bd305d2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java @@ -42,7 +42,7 @@ public final CompositeValuesSourceBuilder asValueSource() { } else if (script.outputType() == DataType.DATE) { builder.valueType(ValueType.LONG); } else if (script.outputType() == DataType.DATETIME) { - builder.valueType(ValueType.DATE); + builder.valueType(ValueType.LONG); } else if (script.outputType() == DataType.BOOLEAN) { builder.valueType(ValueType.BOOLEAN); } else if (script.outputType() == DataType.IP) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java index 95ab6b3b41039..1dc8d1ae60ad6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java @@ -7,8 +7,6 @@ import org.elasticsearch.xpack.sql.execution.search.AggRef; -import java.time.ZoneId; - /** * Reference to a GROUP BY agg (typically this gets translated to a composite key). */ @@ -20,12 +18,12 @@ public enum Property { private final String key; private final Property property; - private final ZoneId zoneId; + private final boolean isDateTimeBased; - public GroupByRef(String key, Property property, ZoneId zoneId) { + public GroupByRef(String key, Property property, boolean isDateTimeBased) { this.key = key; this.property = property == null ? Property.VALUE : property; - this.zoneId = zoneId; + this.isDateTimeBased = isDateTimeBased; } public String key() { @@ -36,8 +34,8 @@ public Property property() { return property; } - public ZoneId zoneId() { - return zoneId; + public boolean isDateTimeBased() { + return isDateTimeBased; } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java index 75ee3d8f44743..f1602df4c0e79 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java @@ -17,19 +17,21 @@ public class MetricAggRef extends AggRef { private final String name; private final String property; private final String innerKey; + private final boolean isDateTimeBased; - public MetricAggRef(String name) { - this(name, "value"); + public MetricAggRef(String name, boolean isDateTimeBased) { + this(name, "value", isDateTimeBased); } - public MetricAggRef(String name, String property) { - this(name, property, null); + public MetricAggRef(String name, String property, boolean isDateTimeBased) { + this(name, property, null, isDateTimeBased); } - public MetricAggRef(String name, String property, String innerKey) { + public MetricAggRef(String name, String property, String innerKey, boolean isDateTimeBased) { this.name = name; this.property = property; this.innerKey = innerKey; + this.isDateTimeBased = isDateTimeBased; } public String name() { @@ -44,6 +46,10 @@ public String innerKey() { return innerKey; } + public boolean isDateTimeBased() { + return isDateTimeBased; + } + @Override public String toString() { String i = innerKey != null ? "[" + innerKey + "]" : ""; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java index 960b01f17c3f3..0fa2236a6bb5a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; import org.elasticsearch.xpack.sql.querydsl.agg.GroupByKey; import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; -import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.MatchAll; import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; @@ -81,23 +80,26 @@ public class QueryContainer { private final Set sort; private final int limit; + private final boolean trackHits; // computed private Boolean aggsOnly; private Boolean customSort; public QueryContainer() { - this(null, null, null, null, null, null, null, -1); + this(null, null, null, null, null, null, null, -1, false); } - public QueryContainer(Query query, - Aggs aggs, - List> fields, + public QueryContainer(Query query, + Aggs aggs, + List> fields, AttributeMap aliases, - Map pseudoFunctions, - AttributeMap scalarFunctions, - Set sort, - int limit) { + Map pseudoFunctions, + AttributeMap scalarFunctions, + Set sort, + int limit, + boolean trackHits) { this.query = query; this.aggs = aggs == null ? Aggs.EMPTY : aggs; this.fields = fields == null || fields.isEmpty() ? emptyList() : fields; @@ -106,6 +108,7 @@ public QueryContainer(Query query, this.scalarFunctions = scalarFunctions == null || scalarFunctions.isEmpty() ? AttributeMap.emptyAttributeMap() : scalarFunctions; this.sort = sort == null || sort.isEmpty() ? emptySet() : sort; this.limit = limit; + this.trackHits = trackHits; } /** @@ -230,38 +233,46 @@ public boolean hasColumns() { return fields.size() > 0; } + public boolean shouldTrackHits() { + return trackHits; + } + // // copy methods // public QueryContainer with(Query q) { - return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit); + return new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); } public QueryContainer withAliases(AttributeMap a) { - return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit); + return new QueryContainer(query, aggs, fields, a, pseudoFunctions, scalarFunctions, sort, limit, trackHits); } public QueryContainer withPseudoFunctions(Map p) { - return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit); + return new QueryContainer(query, aggs, fields, aliases, p, scalarFunctions, sort, limit, trackHits); } public QueryContainer with(Aggs a) { - return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit); + return new QueryContainer(query, a, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); } public QueryContainer withLimit(int l) { - return l == limit ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, l); + return l == limit ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, l, trackHits); + } + + public QueryContainer withTrackHits() { + return trackHits ? this : new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, true); } public QueryContainer withScalarProcessors(AttributeMap procs) { - return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit); + return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, procs, sort, limit, trackHits); } public QueryContainer addSort(Sort sortable) { Set sort = new LinkedHashSet<>(this.sort); sort.add(sortable); - return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit); + return new QueryContainer(query, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits); } private String aliasName(Attribute attr) { @@ -287,7 +298,7 @@ private Tuple nestedHitFieldRef(FieldAttribute attr.field().isAggregatable(), attr.parent().name()); nestedRefs.add(nestedFieldRef); - return new Tuple<>(new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit), + return new Tuple<>(new QueryContainer(q, aggs, fields, aliases, pseudoFunctions, scalarFunctions, sort, limit, trackHits), nestedFieldRef); } @@ -390,7 +401,7 @@ public QueryContainer addColumn(FieldExtraction ref, Attribute attr) { ExpressionId id = attr instanceof AggregateFunctionAttribute ? ((AggregateFunctionAttribute) attr).innerId() : attr.id(); return new QueryContainer(query, aggs, combine(fields, new Tuple<>(ref, id)), aliases, pseudoFunctions, scalarFunctions, - sort, limit); + sort, limit, trackHits); } public AttributeMap scalarFunctions() { @@ -401,16 +412,6 @@ public AttributeMap scalarFunctions() { // agg methods // - public QueryContainer addAggCount(GroupByKey group, ExpressionId functionId) { - FieldExtraction ref = group == null ? GlobalCountRef.INSTANCE : new GroupByRef(group.id(), Property.COUNT, null); - Map pseudoFunctions = new LinkedHashMap<>(this.pseudoFunctions); - pseudoFunctions.put(functionId.toString(), group); - return new QueryContainer(query, aggs, combine(fields, new Tuple<>(ref, functionId)), - aliases, - pseudoFunctions, - scalarFunctions, sort, limit); - } - public QueryContainer addAgg(String groupId, LeafAgg agg) { return with(aggs.addAgg(agg)); } @@ -465,4 +466,4 @@ public String toString() { throw new RuntimeException("error rendering", e); } } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java index 6acdf2919dcc2..2b7033309cf85 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java @@ -72,15 +72,15 @@ public QueryBuilder asBuilder() { return boolQuery; } - boolean isAnd() { + public boolean isAnd() { return isAnd; } - Query left() { + public Query left() { return left; } - Query right() { + public Query right() { return right; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java index d0fe697268d41..7bddacb86bf74 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -28,16 +29,17 @@ public class MatchQuery extends LeafQuery { // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java index 4f0bc0720ae83..2c6b47d7bdcc3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -29,18 +30,19 @@ public class MultiMatchQuery extends LeafQuery { appliers.put("slop", (qb, s) -> qb.slop(Integer.valueOf(s))); // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("type", (qb, s) -> qb.type(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java index de457ba918e7c..a6d8ff2dbf5fc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; @@ -28,30 +29,29 @@ public class QueryStringQuery extends LeafQuery { static { HashMap> appliers = new HashMap<>(28); // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); + appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("default_field", (qb, s) -> qb.defaultField(s)); appliers.put("default_operator", (qb, s) -> qb.defaultOperator(Operator.fromString(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); - appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); - appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); - appliers.put("lowercase_expanded_terms", (qb, s) -> {}); appliers.put("enable_position_increments", (qb, s) -> qb.enablePositionIncrements(Booleans.parseBoolean(s))); appliers.put("escape", (qb, s) -> qb.escape(Booleans.parseBoolean(s))); - appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_max_expansions", (qb, s) -> qb.fuzzyMaxExpansions(Integer.valueOf(s))); + appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("phrase_slop", (qb, s) -> qb.phraseSlop(Integer.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); - appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); appliers.put("rewrite", (qb, s) -> qb.rewrite(s)); - appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); appliers.put("quote_field_suffix", (qb, s) -> qb.quoteFieldSuffix(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); - appliers.put("locale", (qb, s) -> {}); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("time_zone", (qb, s) -> qb.timeZone(s)); appliers.put("type", (qb, s) -> qb.type(MultiMatchQueryBuilder.Type.parse(s, LoggingDeprecationHandler.INSTANCE))); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java index 2ed68def135ec..689b0b338a99e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java @@ -38,7 +38,7 @@ public Limiter(int maximumRuns) { boolean reached(int runs) { if (runs >= this.runs) { - throw new RuleExecutionException("Rule execution limit %d reached", runs); + throw new RuleExecutionException("Rule execution limit [{}] reached", runs); } return false; } @@ -139,7 +139,7 @@ protected ExecutionInfo executeWithInfo(TreeType plan) { for (Batch batch : batches) { int batchRuns = 0; - List tfs = new ArrayList(); + List tfs = new ArrayList<>(); transformations.put(batch, tfs); boolean hasChanged = false; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java index e386cbb3b3205..d03ac08305e3e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -23,6 +23,7 @@ public class Configuration { private final String clientId; private final String username; private final String clusterName; + private final boolean multiValueFieldLeniency; private final ZonedDateTime now; @Nullable @@ -30,7 +31,8 @@ public class Configuration { public Configuration(ZoneId zi, int pageSize, TimeValue requestTimeout, TimeValue pageTimeout, QueryBuilder filter, Mode mode, String clientId, - String username, String clusterName) { + String username, String clusterName, + boolean multiValueFieldLeniency) { this.zoneId = zi.normalized(); this.pageSize = pageSize; this.requestTimeout = requestTimeout; @@ -40,6 +42,7 @@ public Configuration(ZoneId zi, int pageSize, TimeValue requestTimeout, TimeValu this.clientId = clientId; this.username = username; this.clusterName = clusterName; + this.multiValueFieldLeniency = multiValueFieldLeniency; this.now = ZonedDateTime.now(zoneId); } @@ -77,8 +80,12 @@ public String username() { public String clusterName() { return clusterName; } - + public ZonedDateTime now() { return now; } + + public boolean multiValueFieldLeniency() { + return multiValueFieldLeniency; + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index d3495c4f719a7..2f7e8dc1318c1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -76,7 +76,7 @@ public static String encodeToString(Version version, Cursor info) { } return os.toString(StandardCharsets.UTF_8.name()); } catch (Exception ex) { - throw new SqlIllegalArgumentException("Unexpected failure retriving next page", ex); + throw new SqlIllegalArgumentException("Unexpected failure retrieving next page", ex); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 14062b4caaf01..39441cb50283c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -13,6 +13,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Map.Entry; /** * Elasticsearch SQL data types. @@ -43,11 +44,11 @@ public enum DataType { OBJECT( "object", JDBCType.STRUCT, -1, 0, 0, false, false, false), NESTED( "nested", JDBCType.STRUCT, -1, 0, 0, false, false, false), BINARY( "binary", JDBCType.VARBINARY, -1, Integer.MAX_VALUE, 0, false, false, false), - DATE( JDBCType.DATE, Long.BYTES, 10, 10, false, false, true), // since ODBC and JDBC interpret precision for Date as display size - // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) - // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 - DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 24, 24, false, false, true), + // the precision is 23 (number of chars in ISO8601 with millis) + 6 chars for the timezone (e.g.: +05:00) + // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288, + DATE( JDBCType.DATE, Long.BYTES, 29, 29, false, false, true), + DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 29, 29, false, false, true), // // specialized types // @@ -73,58 +74,78 @@ public enum DataType { INTERVAL_MINUTE_TO_SECOND(ExtTypes.INTERVAL_MINUTE_TO_SECOND,Long.BYTES, 23, 23, false, false, false); // @formatter:on - private static final Map odbcToEs; + private static final Map ODBC_TO_ES = new HashMap<>(36); static { - odbcToEs = new HashMap<>(36); - // Numeric - odbcToEs.put("SQL_BIT", BOOLEAN); - odbcToEs.put("SQL_TINYINT", BYTE); - odbcToEs.put("SQL_SMALLINT", SHORT); - odbcToEs.put("SQL_INTEGER", INTEGER); - odbcToEs.put("SQL_BIGINT", LONG); - odbcToEs.put("SQL_FLOAT", FLOAT); - odbcToEs.put("SQL_REAL", FLOAT); - odbcToEs.put("SQL_DOUBLE", DOUBLE); - odbcToEs.put("SQL_DECIMAL", DOUBLE); - odbcToEs.put("SQL_NUMERIC", DOUBLE); + ODBC_TO_ES.put("SQL_BIT", BOOLEAN); + ODBC_TO_ES.put("SQL_TINYINT", BYTE); + ODBC_TO_ES.put("SQL_SMALLINT", SHORT); + ODBC_TO_ES.put("SQL_INTEGER", INTEGER); + ODBC_TO_ES.put("SQL_BIGINT", LONG); + ODBC_TO_ES.put("SQL_REAL", FLOAT); + ODBC_TO_ES.put("SQL_FLOAT", DOUBLE); + ODBC_TO_ES.put("SQL_DOUBLE", DOUBLE); + ODBC_TO_ES.put("SQL_DECIMAL", DOUBLE); + ODBC_TO_ES.put("SQL_NUMERIC", DOUBLE); // String - odbcToEs.put("SQL_GUID", KEYWORD); - odbcToEs.put("SQL_CHAR", KEYWORD); - odbcToEs.put("SQL_WCHAR", KEYWORD); - odbcToEs.put("SQL_VARCHAR", TEXT); - odbcToEs.put("SQL_WVARCHAR", TEXT); - odbcToEs.put("SQL_LONGVARCHAR", TEXT); - odbcToEs.put("SQL_WLONGVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_GUID", KEYWORD); + ODBC_TO_ES.put("SQL_CHAR", KEYWORD); + ODBC_TO_ES.put("SQL_WCHAR", KEYWORD); + ODBC_TO_ES.put("SQL_VARCHAR", TEXT); + ODBC_TO_ES.put("SQL_WVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_LONGVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_WLONGVARCHAR", TEXT); // Binary - odbcToEs.put("SQL_BINARY", BINARY); - odbcToEs.put("SQL_VARBINARY", BINARY); - odbcToEs.put("SQL_LONGVARBINARY", BINARY); + ODBC_TO_ES.put("SQL_BINARY", BINARY); + ODBC_TO_ES.put("SQL_VARBINARY", BINARY); + ODBC_TO_ES.put("SQL_LONGVARBINARY", BINARY); // Date - odbcToEs.put("SQL_DATE", DATE); - odbcToEs.put("SQL_TIME", DATETIME); - odbcToEs.put("SQL_TIMESTAMP", DATETIME); + ODBC_TO_ES.put("SQL_DATE", DATE); + ODBC_TO_ES.put("SQL_TIME", DATETIME); + ODBC_TO_ES.put("SQL_TIMESTAMP", DATETIME); // Intervals - odbcToEs.put("SQL_INTERVAL_HOUR_TO_MINUTE", INTERVAL_HOUR_TO_MINUTE); - odbcToEs.put("SQL_INTERVAL_HOUR_TO_SECOND", INTERVAL_HOUR_TO_SECOND); - odbcToEs.put("SQL_INTERVAL_MINUTE_TO_SECOND", INTERVAL_MINUTE_TO_SECOND); - odbcToEs.put("SQL_INTERVAL_MONTH", INTERVAL_MONTH); - odbcToEs.put("SQL_INTERVAL_YEAR", INTERVAL_YEAR); - odbcToEs.put("SQL_INTERVAL_YEAR_TO_MONTH", INTERVAL_YEAR_TO_MONTH); - odbcToEs.put("SQL_INTERVAL_DAY", INTERVAL_DAY); - odbcToEs.put("SQL_INTERVAL_HOUR", INTERVAL_HOUR); - odbcToEs.put("SQL_INTERVAL_MINUTE", INTERVAL_MINUTE); - odbcToEs.put("SQL_INTERVAL_SECOND", INTERVAL_SECOND); - odbcToEs.put("SQL_INTERVAL_DAY_TO_HOUR", INTERVAL_DAY_TO_HOUR); - odbcToEs.put("SQL_INTERVAL_DAY_TO_MINUTE", INTERVAL_DAY_TO_MINUTE); - odbcToEs.put("SQL_INTERVAL_DAY_TO_SECOND", INTERVAL_DAY_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR_TO_MINUTE", INTERVAL_HOUR_TO_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR_TO_SECOND", INTERVAL_HOUR_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_MINUTE_TO_SECOND", INTERVAL_MINUTE_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_MONTH", INTERVAL_MONTH); + ODBC_TO_ES.put("SQL_INTERVAL_YEAR", INTERVAL_YEAR); + ODBC_TO_ES.put("SQL_INTERVAL_YEAR_TO_MONTH", INTERVAL_YEAR_TO_MONTH); + ODBC_TO_ES.put("SQL_INTERVAL_DAY", INTERVAL_DAY); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR", INTERVAL_HOUR); + ODBC_TO_ES.put("SQL_INTERVAL_MINUTE", INTERVAL_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_SECOND", INTERVAL_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_HOUR", INTERVAL_DAY_TO_HOUR); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_MINUTE", INTERVAL_DAY_TO_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_SECOND", INTERVAL_DAY_TO_SECOND); } + private static final Map SQL_TO_ES = new HashMap<>(45); + static { + // first add ES types + for (DataType type : DataType.values()) { + if (type.isPrimitive()) { + SQL_TO_ES.put(type.name(), type); + } + } + + // reuse the ODBC definition (without SQL_) + // note that this will override existing types in particular FLOAT + for (Entry entry : ODBC_TO_ES.entrySet()) { + SQL_TO_ES.put(entry.getKey().substring(4), entry.getValue()); + } + + + // special ones + SQL_TO_ES.put("BOOL", DataType.BOOLEAN); + SQL_TO_ES.put("INT", DataType.INTEGER); + SQL_TO_ES.put("STRING", DataType.KEYWORD); + } + /** * Type's name used for error messages and column info for the clients */ @@ -226,7 +247,7 @@ public boolean isString() { } public boolean isPrimitive() { - return this != OBJECT && this != NESTED; + return this != OBJECT && this != NESTED && this != UNSUPPORTED; } public boolean isDateBased() { @@ -234,9 +255,13 @@ public boolean isDateBased() { } public static DataType fromOdbcType(String odbcType) { - return odbcToEs.get(odbcType); + return ODBC_TO_ES.get(odbcType); } + public static DataType fromSqlOrEsType(String typeName) { + return SQL_TO_ES.get(typeName.toUpperCase(Locale.ROOT)); + } + /** * Creates returns DataType enum corresponding to the specified es type */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index bc89b0f1e1587..c5b9e947f384f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -11,6 +11,7 @@ import org.elasticsearch.xpack.sql.util.DateUtils; import java.time.ZonedDateTime; +import java.time.format.DateTimeParseException; import java.util.Locale; import java.util.function.DoubleFunction; import java.util.function.Function; @@ -102,6 +103,17 @@ public static DataType commonType(DataType left, DataType right) { return right; } } + // Interval * integer is a valid operation + if (DataTypes.isInterval(left)) { + if (right.isInteger()) { + return left; + } + } + if (DataTypes.isInterval(right)) { + if (left.isInteger()) { + return right; + } + } if (DataTypes.isInterval(left)) { // intervals widening if (DataTypes.isInterval(right)) { @@ -546,8 +558,8 @@ private static Function fromString(Function conv return converter.apply(value.toString()); } catch (NumberFormatException e) { throw new SqlIllegalArgumentException(e, "cannot cast [{}] to [{}]", value, to); - } catch (IllegalArgumentException e) { - throw new SqlIllegalArgumentException(e, "cannot cast [{}] to [{}]:{}", value, to, e.getMessage()); + } catch (DateTimeParseException | IllegalArgumentException e) { + throw new SqlIllegalArgumentException(e, "cannot cast [{}] to [{}]: {}", value, to, e.getMessage()); } }; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java index 47a2904adb7a7..52f531ba6e41e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java @@ -14,6 +14,7 @@ * SQL-related information about an index field */ public class EsField { + private final DataType esDataType; private final boolean aggregatable; private final Map properties; @@ -58,7 +59,9 @@ public Map getProperties() { /** * Returns the path to the keyword version of this field if this field is text and it has a subfield that is - * indexed as keyword, null if such field is not found or the field name itself in all other cases + * indexed as keyword, throws an exception if such field is not found or the field name itself in all other cases. + * To avoid the exception {@link EsField#getExactInfo()} should be used beforehand, to check if an exact field exists + * and if not get the errorMessage which explains why is that. */ public EsField getExactField() { return this; @@ -76,13 +79,14 @@ public int getPrecision() { } /** - * True if this field name can be used in sorting, aggregations and term queries as is - *

- * This will be true for most fields except analyzed text fields that cannot be used directly and should be - * replaced with the field returned by {@link EsField#getExactField()} instead. + * Returns and {@link Exact} object with all the necessary info about the field: + *

    + *
  • If it has an exact underlying field or not
  • + *
  • and if not an error message why it doesn't
  • + *
*/ - public boolean isExact() { - return true; + public Exact getExactInfo() { + return Exact.EXACT_FIELD; } @Override @@ -108,4 +112,25 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(esDataType, aggregatable, properties, name); } -} \ No newline at end of file + + public static final class Exact { + + private static Exact EXACT_FIELD = new Exact(true, null); + + private boolean hasExact; + private String errorMsg; + + public Exact(boolean hasExact, String errorMsg) { + this.hasExact = hasExact; + this.errorMsg = errorMsg; + } + + public boolean hasExact() { + return hasExact; + } + + public String errorMsg() { + return errorMsg; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java index 59bb94c78c86e..79f8eb1c20c1f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/InvalidMappedField.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.type; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.util.Objects; @@ -46,12 +46,12 @@ public boolean equals(Object obj) { @Override public EsField getExactField() { - throw new MappingException("Field [" + getName() + "] is invalid, cannot access it"); + throw new SqlIllegalArgumentException("Field [" + getName() + "] is invalid, cannot access it"); } @Override - public boolean isExact() { - return false; + public Exact getExactInfo() { + return new Exact(false, "Field [" + getName() + "] is invalid, cannot access it"); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java index d40fa7b19af92..3b77608fc8bed 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java @@ -33,8 +33,8 @@ public int getPrecision() { } @Override - public boolean isExact() { - return normalized == false; + public Exact getExactInfo() { + return new Exact(normalized == false, "Normalized keyword field cannot be used for exact match operations"); } @Override @@ -52,4 +52,4 @@ public int hashCode() { return Objects.hash(super.hashCode(), precision, normalized); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java index f1c596a301c54..4944a472e2104 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java @@ -5,9 +5,11 @@ */ package org.elasticsearch.xpack.sql.type; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.util.Map; +import java.util.function.Function; /** * SQL-related information about an index field with text type @@ -20,25 +22,41 @@ public TextEsField(String name, Map properties, boolean hasDocV @Override public EsField getExactField() { + Tuple findExact = findExact(); + if (findExact.v1() == null) { + throw new SqlIllegalArgumentException(findExact.v2()); + } + return findExact.v1(); + } + + @Override + public Exact getExactInfo() { + return PROCESS_EXACT_FIELD.apply(findExact()); + } + + private Tuple findExact() { EsField field = null; for (EsField property : getProperties().values()) { - if (property.getDataType() == DataType.KEYWORD && property.isExact()) { + if (property.getDataType() == DataType.KEYWORD && property.getExactInfo().hasExact()) { if (field != null) { - throw new MappingException("Multiple exact keyword candidates available for [" + getName() + - "]; specify which one to use"); + return new Tuple<>(null, "Multiple exact keyword candidates available for [" + getName() + + "]; specify which one to use"); } field = property; } } if (field == null) { - throw new MappingException("No keyword/multi-field defined exact matches for [" + getName() + - "]; define one or use MATCH/QUERY instead"); + return new Tuple<>(null, "No keyword/multi-field defined exact matches for [" + getName() + + "]; define one or use MATCH/QUERY instead"); } - return field; + return new Tuple<>(field, null); } - @Override - public boolean isExact() { - return false; - } + private Function, Exact> PROCESS_EXACT_FIELD = tuple -> { + if (tuple.v1() == null) { + return new Exact(false, tuple.v2()); + } else { + return new Exact(true, null); + } + }; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java index c88d676c223b6..2909c5f199053 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java @@ -26,16 +26,21 @@ public String getOriginalType() { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } UnsupportedEsField that = (UnsupportedEsField) o; return Objects.equals(originalType, that.originalType); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), originalType); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java index deb7b9e9703c2..b0be4ec434d60 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java @@ -6,29 +6,39 @@ package org.elasticsearch.xpack.sql.util; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.proto.StringUtils; -import org.joda.time.DateTime; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; import java.time.Instant; -import java.time.LocalDateTime; +import java.time.LocalDate; import java.time.ZoneId; -import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.format.DateTimeFormatter.ISO_LOCAL_TIME; public final class DateUtils { - private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; - - // TODO: do we have a java.time based parser we can use instead? - private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - public static final ZoneId UTC = ZoneId.of("Z"); public static final String DATE_PARSE_FORMAT = "epoch_millis"; + private static final DateTimeFormatter DATE_TIME_ESCAPED_LITERAL_FORMATTER = new DateTimeFormatterBuilder() + .append(ISO_LOCAL_DATE) + .appendLiteral(" ") + .append(ISO_LOCAL_TIME) + .toFormatter().withZone(UTC); + + private static final DateFormatter UTC_DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(UTC); + private static final int DEFAULT_PRECISION_FOR_CURRENT_FUNCTIONS = 3; + + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000L; + private DateUtils() {} /** @@ -56,22 +66,7 @@ public static ZonedDateTime asDateTime(long millis, ZoneId id) { * Parses the given string into a Date (SQL DATE type) using UTC as a default timezone. */ public static ZonedDateTime asDateOnly(String dateFormat) { - return asDateOnly(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); - } - - public static ZonedDateTime asDateOnly(DateTime dateTime) { - LocalDateTime ldt = LocalDateTime.of( - dateTime.getYear(), - dateTime.getMonthOfYear(), - dateTime.getDayOfMonth(), - 0, - 0, - 0, - 0); - - return ZonedDateTime.ofStrict(ldt, - ZoneOffset.ofTotalSeconds(dateTime.getZone().getOffset(dateTime) / 1000), - org.elasticsearch.common.time.DateUtils.dateTimeZoneToZoneId(dateTime.getZone())); + return LocalDate.parse(dateFormat, ISO_LOCAL_DATE).atStartOfDay(UTC); } public static ZonedDateTime asDateOnly(ZonedDateTime zdt) { @@ -82,25 +77,13 @@ public static ZonedDateTime asDateOnly(ZonedDateTime zdt) { * Parses the given string into a DateTime using UTC as a default timezone. */ public static ZonedDateTime asDateTime(String dateFormat) { - return asDateTime(UTC_DATE_FORMATTER.parseDateTime(dateFormat)); + return DateFormatters.from(UTC_DATE_TIME_FORMATTER.parse(dateFormat)).withZoneSameInstant(UTC); } - public static ZonedDateTime asDateTime(DateTime dateTime) { - LocalDateTime ldt = LocalDateTime.of( - dateTime.getYear(), - dateTime.getMonthOfYear(), - dateTime.getDayOfMonth(), - dateTime.getHourOfDay(), - dateTime.getMinuteOfHour(), - dateTime.getSecondOfMinute(), - dateTime.getMillisOfSecond() * 1_000_000); - - return ZonedDateTime.ofStrict(ldt, - ZoneOffset.ofTotalSeconds(dateTime.getZone().getOffset(dateTime) / 1000), - org.elasticsearch.common.time.DateUtils.dateTimeZoneToZoneId(dateTime.getZone())); + public static ZonedDateTime ofEscapedLiteral(String dateFormat) { + return ZonedDateTime.parse(dateFormat, DATE_TIME_ESCAPED_LITERAL_FORMATTER.withZone(UTC)); } - public static String toString(ZonedDateTime dateTime) { return StringUtils.toString(dateTime); } @@ -115,4 +98,25 @@ public static long minDayInterval(long l) { } return l - (l % DAY_IN_MILLIS); } + + public static int getNanoPrecision(Expression precisionExpression, int nano) { + int precision = DEFAULT_PRECISION_FOR_CURRENT_FUNCTIONS; + + if (precisionExpression != null) { + try { + precision = Foldables.intValueOf(precisionExpression); + } catch (Exception e) { + throw new ParsingException(precisionExpression.source(), "invalid precision; " + e.getMessage()); + } + } + + if (precision < 0 || precision > 9) { + throw new ParsingException(precisionExpression.source(), "precision needs to be between [0-9], received [{}]", + precisionExpression.sourceText()); + } + + // remove the remainder + nano = nano - nano % (int) Math.pow(10, (9 - precision)); + return nano; + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java index 10066e7764966..bd3bab136c679 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -239,6 +239,34 @@ public static String likeToIndexWildcard(String pattern, char escape) { return wildcard.toString(); } + public static String likeToUnescaped(String pattern, char escape) { + StringBuilder wildcard = new StringBuilder(pattern.length()); + + boolean escaped = false; + for (int i = 0; i < pattern.length(); i++) { + char curr = pattern.charAt(i); + + if (escaped == false && curr == escape && escape != 0) { + escaped = true; + } else { + if (escaped == true && (curr == '%' || curr == '_' || curr == escape)) { + wildcard.append(curr); + } else { + if (escaped) { + wildcard.append(escape); + } + wildcard.append(curr); + } + escaped = false; + } + } + // corner-case when the escape char is the last char + if (escaped == true) { + wildcard.append(escape); + } + return wildcard.toString(); + } + public static String toString(SearchSourceBuilder source) { try (XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true)) { source.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 4e9fc1475e302..f628d1be868c3 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -64,7 +64,9 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS def div(Object, Object) def mod(Object, Object) def mul(Object, Object) + Number atan2(Number, Number) Number neg(Number) + Number power(Number, Number) Number round(Number, Number) Number truncate(Number, Number) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java index be7f42d3f0c78..ccfe97ed2fae5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.session.Configuration; @@ -14,6 +15,14 @@ import java.time.Clock; import java.time.Duration; import java.time.ZonedDateTime; +import java.time.ZoneId; + +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomNonNegativeLong; +import static org.elasticsearch.test.ESTestCase.randomZone; + public class TestUtils { @@ -21,7 +30,7 @@ private TestUtils() {} public static final Configuration TEST_CFG = new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, - null, null, null); + null, null, null, false); /** * Returns the current UTC date-time with milliseconds precision. @@ -35,4 +44,32 @@ private TestUtils() {} public static final ZonedDateTime now() { return ZonedDateTime.now(Clock.tick(Clock.system(DateUtils.UTC), Duration.ofMillis(1))); } + + public static Configuration randomConfiguration() { + return new Configuration(randomZone(), + randomIntBetween(0, 1000), + new TimeValue(randomNonNegativeLong()), + new TimeValue(randomNonNegativeLong()), + null, + randomFrom(Mode.values()), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + false); + } + + public static Configuration randomConfiguration(ZoneId providedZoneId) { + return new Configuration(providedZoneId, + randomIntBetween(0, 1000), + new TimeValue(randomNonNegativeLong()), + new TimeValue(randomNonNegativeLong()), + null, + randomFrom(Mode.values()), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + false); + } + } + diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index 607810efc666a..bc7b85b5392e9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -6,10 +6,10 @@ package org.elasticsearch.xpack.sql.analysis.analyzer; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; @@ -113,9 +113,9 @@ public void testExactKeyword() { assertThat(attr.path(), is("some")); assertThat(attr.name(), is("some.string")); assertThat(attr.dataType(), is(DataType.TEXT)); - assertThat(attr.isInexact(), is(true)); + assertTrue(attr.getExactInfo().hasExact()); FieldAttribute exact = attr.exactAttribute(); - assertThat(exact.isInexact(), is(false)); + assertTrue(exact.getExactInfo().hasExact()); assertThat(exact.name(), is("some.string.typical")); assertThat(exact.dataType(), is(KEYWORD)); } @@ -125,9 +125,11 @@ public void testAmbiguousExactKeyword() { assertThat(attr.path(), is("some")); assertThat(attr.name(), is("some.ambiguous")); assertThat(attr.dataType(), is(DataType.TEXT)); - assertThat(attr.isInexact(), is(true)); - MappingException me = expectThrows(MappingException.class, () -> attr.exactAttribute()); - assertThat(me.getMessage(), + assertFalse(attr.getExactInfo().hasExact()); + assertThat(attr.getExactInfo().errorMsg(), + is("Multiple exact keyword candidates available for [ambiguous]; specify which one to use")); + SqlIllegalArgumentException e = expectThrows(SqlIllegalArgumentException.class, () -> attr.exactAttribute()); + assertThat(e.getMessage(), is("Multiple exact keyword candidates available for [ambiguous]; specify which one to use")); } @@ -136,7 +138,7 @@ public void testNormalizedKeyword() { assertThat(attr.path(), is("some.string")); assertThat(attr.name(), is("some.string.normalized")); assertThat(attr.dataType(), is(KEYWORD)); - assertThat(attr.isInexact(), is(true)); + assertFalse(attr.getExactInfo().hasExact()); } public void testDottedFieldPath() { @@ -197,4 +199,4 @@ public void testFieldAmbiguity() { assertThat(attribute.qualifier(), is("test")); assertThat(attribute.name(), is("test.test")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 415472bfe3521..ea3061761db04 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -209,6 +209,27 @@ public void testSubtractFromInterval() { error("SELECT INTERVAL 1 MONTH - CAST('2000-01-01' AS DATETIME)")); } + public void testAddIntervalAndNumberNotAllowed() { + assertEquals("1:8: [+] has arguments with incompatible types [INTERVAL_DAY] and [INTEGER]", + error("SELECT INTERVAL 1 DAY + 100")); + assertEquals("1:8: [+] has arguments with incompatible types [INTEGER] and [INTERVAL_DAY]", + error("SELECT 100 + INTERVAL 1 DAY")); + } + + public void testSubtractIntervalAndNumberNotAllowed() { + assertEquals("1:8: [-] has arguments with incompatible types [INTERVAL_MINUTE] and [DOUBLE]", + error("SELECT INTERVAL 10 MINUTE - 100.0")); + assertEquals("1:8: [-] has arguments with incompatible types [DOUBLE] and [INTERVAL_MINUTE]", + error("SELECT 100.0 - INTERVAL 10 MINUTE")); + } + + public void testMultiplyIntervalWithDecimalNotAllowed() { + assertEquals("1:8: [*] has arguments with incompatible types [INTERVAL_MONTH] and [DOUBLE]", + error("SELECT INTERVAL 1 MONTH * 1.234")); + assertEquals("1:8: [*] has arguments with incompatible types [DOUBLE] and [INTERVAL_MONTH]", + error("SELECT 1.234 * INTERVAL 1 MONTH")); + } + public void testMultipleColumns() { assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", error("SELECT xxx FROM test GROUP BY DAY_oF_YEAR(xxx)")); @@ -259,7 +280,7 @@ public void testGroupByNegativeOrdinal() { } public void testGroupByOrderByAliasedInSelectAllowed() { - LogicalPlan lp = accept("SELECT text t FROM test GROUP BY text ORDER BY t"); + LogicalPlan lp = accept("SELECT int i FROM test GROUP BY int ORDER BY i"); assertNotNull(lp); } @@ -292,6 +313,12 @@ public void testStarOnNested() { assertNotNull(accept("SELECT dep.* FROM test")); } + public void testGroupByOnInexact() { + assertEquals("1:36: Field of data type [text] cannot be used for grouping; " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT COUNT(*) FROM test GROUP BY text")); + } + public void testGroupByOnNested() { assertEquals("1:38: Grouping isn't (yet) compatible with nested fields [dep.dep_id]", error("SELECT dep.dep_id FROM test GROUP BY dep.dep_id")); @@ -322,6 +349,18 @@ public void testUnsupportedTypeInFilter() { error("SELECT * FROM test WHERE unsupported > 1")); } + public void testTermEqualitOnInexact() { + assertEquals("1:26: [text = 'value'] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text = 'value'")); + } + + public void testTermEqualityOnAmbiguous() { + assertEquals("1:26: [some.ambiguous = 'value'] cannot operate on first argument field of data type [text]: " + + "Multiple exact keyword candidates available for [ambiguous]; specify which one to use", + error("SELECT * FROM test WHERE some.ambiguous = 'value'")); + } + public void testUnsupportedTypeInFunction() { assertEquals("1:12: Cannot use field [unsupported] type [ip_range] as is unsupported", error("SELECT ABS(unsupported) FROM test")); @@ -332,6 +371,12 @@ public void testUnsupportedTypeInOrder() { error("SELECT * FROM test ORDER BY unsupported")); } + public void testInexactFieldInOrder() { + assertEquals("1:29: ORDER BY cannot be applied to field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test ORDER BY text")); + } + public void testGroupByOrderByAggregate() { accept("SELECT AVG(int) a FROM test GROUP BY bool ORDER BY a"); } @@ -385,85 +430,143 @@ public void testInNestedWithDifferentDataTypesFromLeftValue_SelectClause() { } public void testInWithDifferentDataTypes_WhereClause() { - assertEquals("1:49: expected data type [text], value provided is of type [integer]", - error("SELECT * FROM test WHERE text IN ('foo', 'bar', 4)")); + assertEquals("1:52: expected data type [keyword], value provided is of type [integer]", + error("SELECT * FROM test WHERE keyword IN ('foo', 'bar', 4)")); } public void testInNestedWithDifferentDataTypes_WhereClause() { - assertEquals("1:60: expected data type [text], value provided is of type [integer]", - error("SELECT * FROM test WHERE int = 1 OR text IN ('foo', 'bar', 2)")); + assertEquals("1:63: expected data type [keyword], value provided is of type [integer]", + error("SELECT * FROM test WHERE int = 1 OR keyword IN ('foo', 'bar', 2)")); } public void testInWithDifferentDataTypesFromLeftValue_WhereClause() { - assertEquals("1:35: expected data type [text], value provided is of type [integer]", - error("SELECT * FROM test WHERE text IN (1, 2)")); + assertEquals("1:38: expected data type [keyword], value provided is of type [integer]", + error("SELECT * FROM test WHERE keyword IN (1, 2)")); } public void testInNestedWithDifferentDataTypesFromLeftValue_WhereClause() { - assertEquals("1:46: expected data type [text], value provided is of type [integer]", - error("SELECT * FROM test WHERE int = 1 OR text IN (1, 2)")); + assertEquals("1:49: expected data type [keyword], value provided is of type [integer]", + error("SELECT * FROM test WHERE int = 1 OR keyword IN (1, 2)")); + } + + public void testInWithFieldInListOfValues() { + assertEquals("1:26: Comparisons against variables are not (currently) supported; offender [int] in [int IN (1, int)]", + error("SELECT * FROM test WHERE int IN (1, int)")); + } + + public void testInOnFieldTextWithNoKeyword() { + assertEquals("1:26: [IN] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text IN ('foo', 'bar')")); } public void testNotSupportedAggregateOnDate() { - assertEquals("1:8: [AVG(date)] argument must be [numeric], found value [date] type [datetime]", + assertEquals("1:8: argument of [AVG(date)] must be [numeric], found value [date] type [datetime]", error("SELECT AVG(date) FROM test")); } - public void testInvalidTypeForStringFunction_WithOneArg() { - assertEquals("1:8: [LENGTH] argument must be [string], found value [1] type [integer]", + public void testInvalidTypeForStringFunction_WithOneArgString() { + assertEquals("1:8: argument of [LENGTH(1)] must be [string], found value [1] type [integer]", error("SELECT LENGTH(1)")); } + public void testInvalidTypeForStringFunction_WithOneArgNumeric() { + assertEquals("1:8: argument of [CHAR('foo')] must be [integer], found value ['foo'] type [keyword]", + error("SELECT CHAR('foo')")); + } + + public void testInvalidTypeForNestedStringFunctions_WithOneArg() { + assertEquals("1:14: argument of [CHAR('foo')] must be [integer], found value ['foo'] type [keyword]", + error("SELECT ASCII(CHAR('foo'))")); + } + public void testInvalidTypeForNumericFunction_WithOneArg() { - assertEquals("1:8: [COS] argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: argument of [COS('foo')] must be [numeric], found value ['foo'] type [keyword]", error("SELECT COS('foo')")); } public void testInvalidTypeForBooleanFunction_WithOneArg() { - assertEquals("1:8: [NOT 'foo'] argument must be [boolean], found value ['foo'] type [keyword]", + assertEquals("1:8: argument of [NOT 'foo'] must be [boolean], found value ['foo'] type [keyword]", error("SELECT NOT 'foo'")); } public void testInvalidTypeForStringFunction_WithTwoArgs() { - assertEquals("1:8: [CONCAT(1, 'bar')] first argument must be [string], found value [1] type [integer]", + assertEquals("1:8: first argument of [CONCAT] must be [string], found value [1] type [integer]", error("SELECT CONCAT(1, 'bar')")); - assertEquals("1:8: [CONCAT('foo', 2)] second argument must be [string], found value [2] type [integer]", + assertEquals("1:8: second argument of [CONCAT] must be [string], found value [2] type [integer]", error("SELECT CONCAT('foo', 2)")); } public void testInvalidTypeForNumericFunction_WithTwoArgs() { - assertEquals("1:8: [TRUNCATE('foo', 2)] first argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: first argument of [TRUNCATE('foo', 2)] must be [numeric], found value ['foo'] type [keyword]", error("SELECT TRUNCATE('foo', 2)")); - assertEquals("1:8: [TRUNCATE(1.2, 'bar')] second argument must be [numeric], found value ['bar'] type [keyword]", + assertEquals("1:8: second argument of [TRUNCATE(1.2, 'bar')] must be [integer], found value ['bar'] type [keyword]", error("SELECT TRUNCATE(1.2, 'bar')")); } public void testInvalidTypeForBooleanFuntion_WithTwoArgs() { - assertEquals("1:8: [1 OR true] first argument must be [boolean], found value [1] type [integer]", + assertEquals("1:8: first argument of [1 OR true] must be [boolean], found value [1] type [integer]", error("SELECT 1 OR true")); - assertEquals("1:8: [true OR 2] second argument must be [boolean], found value [2] type [integer]", + assertEquals("1:8: second argument of [true OR 2] must be [boolean], found value [2] type [integer]", error("SELECT true OR 2")); } - public void testInvalidTypeForFunction_WithThreeArgs() { - assertEquals("1:8: [REPLACE(1, 'foo', 'bar')] first argument must be [string], found value [1] type [integer]", + public void testInvalidTypeForReplace() { + assertEquals("1:8: first argument of [REPLACE(1, 'foo', 'bar')] must be [string], found value [1] type [integer]", error("SELECT REPLACE(1, 'foo', 'bar')")); - assertEquals("1:8: [REPLACE('text', 2, 'bar')] second argument must be [string], found value [2] type [integer]", - error("SELECT REPLACE('text', 2, 'bar')")); - assertEquals("1:8: [REPLACE('text', 'foo', 3)] third argument must be [string], found value [3] type [integer]", - error("SELECT REPLACE('text', 'foo', 3)")); + assertEquals("1:8: [REPLACE(text, 'foo', 'bar')] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE(text, 'foo', 'bar') FROM test")); + + assertEquals("1:8: second argument of [REPLACE('foo', 2, 'bar')] must be [string], found value [2] type [integer]", + error("SELECT REPLACE('foo', 2, 'bar')")); + assertEquals("1:8: [REPLACE('foo', text, 'bar')] cannot operate on second argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE('foo', text, 'bar') FROM test")); + + assertEquals("1:8: third argument of [REPLACE('foo', 'bar', 3)] must be [string], found value [3] type [integer]", + error("SELECT REPLACE('foo', 'bar', 3)")); + assertEquals("1:8: [REPLACE('foo', 'bar', text)] cannot operate on third argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT REPLACE('foo', 'bar', text) FROM test")); + } + + public void testInvalidTypeForSubString() { + assertEquals("1:8: first argument of [SUBSTRING(1, 2, 3)] must be [string], found value [1] type [integer]", + error("SELECT SUBSTRING(1, 2, 3)")); + assertEquals("1:8: [SUBSTRING(text, 2, 3)] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT SUBSTRING(text, 2, 3) FROM test")); + + assertEquals("1:8: second argument of [SUBSTRING('foo', 'bar', 3)] must be [integer], found value ['bar'] type [keyword]", + error("SELECT SUBSTRING('foo', 'bar', 3)")); + + assertEquals("1:8: third argument of [SUBSTRING('foo', 2, 'bar')] must be [integer], found value ['bar'] type [keyword]", + error("SELECT SUBSTRING('foo', 2, 'bar')")); } public void testInvalidTypeForFunction_WithFourArgs() { - assertEquals("1:8: [INSERT(1, 1, 2, 'new')] first argument must be [string], found value [1] type [integer]", + assertEquals("1:8: first argument of [INSERT(1, 1, 2, 'new')] must be [string], found value [1] type [integer]", error("SELECT INSERT(1, 1, 2, 'new')")); - assertEquals("1:8: [INSERT('text', 'foo', 2, 'new')] second argument must be [numeric], found value ['foo'] type [keyword]", + assertEquals("1:8: second argument of [INSERT('text', 'foo', 2, 'new')] must be [numeric], found value ['foo'] type [keyword]", error("SELECT INSERT('text', 'foo', 2, 'new')")); - assertEquals("1:8: [INSERT('text', 1, 'bar', 'new')] third argument must be [numeric], found value ['bar'] type [keyword]", + assertEquals("1:8: third argument of [INSERT('text', 1, 'bar', 'new')] must be [numeric], found value ['bar'] type [keyword]", error("SELECT INSERT('text', 1, 'bar', 'new')")); - assertEquals("1:8: [INSERT('text', 1, 2, 3)] fourth argument must be [string], found value [3] type [integer]", + assertEquals("1:8: fourth argument of [INSERT('text', 1, 2, 3)] must be [string], found value [3] type [integer]", error("SELECT INSERT('text', 1, 2, 3)")); } + + public void testInvalidTypeForLikeMatch() { + assertEquals("1:26: [text LIKE 'foo'] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text LIKE 'foo'")); + } + + public void testInvalidTypeForRLikeMatch() { + assertEquals("1:26: [text RLIKE 'foo'] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT * FROM test WHERE text RLIKE 'foo'")); + } public void testAllowCorrectFieldsInIncompatibleMappings() { assertNotNull(incompatibleAccept("SELECT languages FROM \"*\"")); @@ -555,10 +658,20 @@ public void testGroupByScalarOnTopOfGrouping() { } public void testAggsInHistogram() { - assertEquals("1:47: Cannot use an aggregate [MAX] for grouping", - error("SELECT MAX(date) FROM test GROUP BY HISTOGRAM(MAX(int), 1)")); + assertEquals("1:37: Cannot use an aggregate [MAX] for grouping", + error("SELECT MAX(date) FROM test GROUP BY MAX(int)")); } - + + public void testGroupingsInHistogram() { + assertEquals( + "1:47: Cannot embed grouping functions within each other, found [HISTOGRAM(int, 1)] in [HISTOGRAM(HISTOGRAM(int, 1), 1)]", + error("SELECT MAX(date) FROM test GROUP BY HISTOGRAM(HISTOGRAM(int, 1), 1)")); + } + + public void testCastInHistogram() { + accept("SELECT MAX(date) FROM test GROUP BY HISTOGRAM(CAST(int AS LONG), 1)"); + } + public void testHistogramNotInGrouping() { assertEquals("1:8: [HISTOGRAM(date, INTERVAL 1 MONTH)] needs to be part of the grouping", error("SELECT HISTOGRAM(date, INTERVAL 1 MONTH) AS h FROM test")); @@ -595,32 +708,34 @@ public void testScalarOfHistogramNotInGrouping() { } public void testErrorMessageForPercentileWithSecondArgBasedOnAField() { - assertEquals("1:8: Second argument of PERCENTILE must be a constant, received [ABS(int)]", + assertEquals("1:8: second argument of [PERCENTILE(int, ABS(int))] must be a constant, received [ABS(int)]", error("SELECT PERCENTILE(int, ABS(int)) FROM test")); } public void testErrorMessageForPercentileRankWithSecondArgBasedOnAField() { - assertEquals("1:8: Second argument of PERCENTILE_RANK must be a constant, received [ABS(int)]", + assertEquals("1:8: second argument of [PERCENTILE_RANK(int, ABS(int))] must be a constant, received [ABS(int)]", error("SELECT PERCENTILE_RANK(int, ABS(int)) FROM test")); } public void testTopHitsFirstArgConstant() { - assertEquals("1:8: First argument of [FIRST] must be a table column, found constant ['foo']", + assertEquals("1:8: first argument of [FIRST('foo', int)] must be a table column, found constant ['foo']", error("SELECT FIRST('foo', int) FROM test")); } public void testTopHitsSecondArgConstant() { - assertEquals("1:8: Second argument of [LAST] must be a table column, found constant [10]", + assertEquals("1:8: second argument of [LAST(int, 10)] must be a table column, found constant [10]", error("SELECT LAST(int, 10) FROM test")); } public void testTopHitsFirstArgTextWithNoKeyword() { - assertEquals("1:8: [FIRST] cannot operate on first argument field of data type [text]", + assertEquals("1:8: [FIRST(text)] cannot operate on first argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", error("SELECT FIRST(text) FROM test")); } public void testTopHitsSecondArgTextWithNoKeyword() { - assertEquals("1:8: [LAST] cannot operate on second argument field of data type [text]", + assertEquals("1:8: [LAST(keyword, text)] cannot operate on second argument field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", error("SELECT LAST(keyword, text) FROM test")); } @@ -629,6 +744,18 @@ public void testTopHitsGroupByHavingUnsupported() { error("SELECT FIRST(int) FROM test GROUP BY text HAVING FIRST(int) > 10")); } + public void testMinOnInexactUnsupported() { + assertEquals("1:8: [MIN(text)] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT MIN(text) FROM test")); + } + + public void testMaxOnInexactUnsupported() { + assertEquals("1:8: [MAX(text)] cannot operate on field of data type [text]: " + + "No keyword/multi-field defined exact matches for [text]; define one or use MATCH/QUERY instead", + error("SELECT MAX(text) FROM test")); + } + public void testMinOnKeywordGroupByHavingUnsupported() { assertEquals("1:52: HAVING filter is unsupported for function [MIN(keyword)]", error("SELECT MIN(keyword) FROM test GROUP BY text HAVING MIN(keyword) > 10")); @@ -650,4 +777,4 @@ public void testAggregateAliasInFilter() { public void testProjectUnresolvedAliasInFilter() { assertEquals("1:8: Unknown column [tni]", error("SELECT tni AS i FROM test WHERE i > 10 GROUP BY i")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index 6123bdf5d8fbb..0f4f8f030506c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -133,6 +133,48 @@ public void testMetaFieldsAreIgnored() throws Exception { assertEquals(DataType.KEYWORD, esIndex.mapping().get("text").getDataType()); } + public void testMergeIncompatibleCapabilitiesOfObjectFields() throws Exception { + Map> fieldCaps = new HashMap<>(); + + int depth = randomInt(5); + + List level = new ArrayList<>(); + String fieldName = randomAlphaOfLength(3); + level.add(fieldName); + for (int i = 0; i <= depth; i++) { + String l = randomAlphaOfLength(3); + level.add(l); + fieldName += "." + l; + } + + // define a sub-field + addFieldCaps(fieldCaps, fieldName + ".keyword", "keyword", true, true); + + Map multi = new HashMap<>(); + multi.put("long", new FieldCapabilities(fieldName, "long", true, true, new String[] { "one-index" }, null, null)); + multi.put("text", new FieldCapabilities(fieldName, "text", true, false, new String[] { "another-index" }, null, null)); + fieldCaps.put(fieldName, multi); + + + String wildcard = "*"; + IndexResolution resolution = IndexResolver.mergedMapping(wildcard, fieldCaps); + + assertTrue(resolution.isValid()); + + EsIndex esIndex = resolution.get(); + assertEquals(wildcard, esIndex.name()); + EsField esField = null; + Map props = esIndex.mapping(); + for (String lvl : level) { + esField = props.get(lvl); + props = esField.getProperties(); + } + assertEquals(InvalidMappedField.class, esField.getClass()); + assertEquals("mapped as [2] incompatible types: [text] in [another-index], [long] in [one-index]", + ((InvalidMappedField) esField).errorMessage()); + } + + public static IndexResolution merge(EsIndex... indices) { return IndexResolver.mergedMapping("*", fromMappings(indices)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java index f2dccc396dbd3..8905a17e0e504 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java @@ -81,4 +81,4 @@ static BitSet randomBitSet(int size) { } return mask; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/QuerierTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/QuerierTests.java new file mode 100644 index 0000000000000..a6caad899dd89 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/QuerierTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.execution.search.Querier.AggSortingQueue; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +public class QuerierTests extends ESTestCase { + + @SuppressWarnings("rawtypes") + public void testAggSortingAscending() { + Tuple tuple = new Tuple<>(0, Comparator.naturalOrder()); + Querier.AggSortingQueue queue = new AggSortingQueue(10, Collections.singletonList(tuple)); + for (int i = 50; i >= 0; i--) { + queue.insertWithOverflow(new Tuple<>(Collections.singletonList(i), i)); + } + List> results = queue.asList(); + + assertEquals(10, results.size()); + for (int i = 0; i < 10; i ++) { + assertEquals(i, results.get(i).get(0)); + } + } + + @SuppressWarnings("rawtypes") + public void testAggSortingDescending() { + Tuple tuple = new Tuple<>(0, Comparator.reverseOrder()); + Querier.AggSortingQueue queue = new AggSortingQueue(10, Collections.singletonList(tuple)); + for (int i = 0; i <= 50; i++) { + queue.insertWithOverflow(new Tuple<>(Collections.singletonList(i), i)); + } + List> results = queue.asList(); + + assertEquals(10, results.size()); + for (int i = 0; i < 10; i ++) { + assertEquals(50 - i, results.get(i).get(0)); + } + } + + @SuppressWarnings("rawtypes") + public void testAggSorting_TwoFields() { + List> tuples = new ArrayList<>(2); + tuples.add(new Tuple<>(0, Comparator.reverseOrder())); + tuples.add(new Tuple<>(1, Comparator.naturalOrder())); + Querier.AggSortingQueue queue = new AggSortingQueue(10, tuples); + + for (int i = 1; i <= 100; i++) { + queue.insertWithOverflow(new Tuple<>(Arrays.asList(i % 50 + 1, i), i)); + } + List> results = queue.asList(); + + assertEquals(10, results.size()); + for (int i = 0; i < 10; i++) { + assertEquals(50 - (i / 2), results.get(i).get(0)); + assertEquals(49 - (i / 2) + ((i % 2) * 50), results.get(i).get(1)); + } + } + + @SuppressWarnings("rawtypes") + public void testAggSorting_Randomized() { + // Initialize comparators for fields (columns) + int noColumns = randomIntBetween(3, 10); + List> tuples = new ArrayList<>(noColumns); + boolean[] ordering = new boolean[noColumns]; + for (int j = 0; j < noColumns; j++) { + boolean order = randomBoolean(); + ordering[j] = order; + tuples.add(new Tuple<>(j, order ? Comparator.naturalOrder() : Comparator.reverseOrder())); + } + + // Insert random no of documents (rows) with random 0/1 values for each field + int noDocs = randomIntBetween(10, 50); + int queueSize = randomIntBetween(4, noDocs / 2); + List> expected = new ArrayList<>(noDocs); + Querier.AggSortingQueue queue = new AggSortingQueue(queueSize, tuples); + for (int i = 0; i < noDocs; i++) { + List values = new ArrayList<>(noColumns); + for (int j = 0; j < noColumns; j++) { + values.add(randomBoolean() ? 1 : 0); + } + queue.insertWithOverflow(new Tuple<>(values, i)); + expected.add(values); + } + + List> results = queue.asList(); + assertEquals(queueSize, results.size()); + expected.sort((o1, o2) -> { + for (int j = 0; j < noColumns; j++) { + if (o1.get(j) < o2.get(j)) { + return ordering[j] ? -1 : 1; + } else if (o1.get(j) > o2.get(j)) { + return ordering[j] ? 1 : -1; + } + } + return 0; + }); + assertEquals(expected.subList(0, queueSize), results); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java index 0c56d7783f8b6..fce24758a3b4d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESTestCase; @@ -111,6 +112,13 @@ public void testNoSort() { assertEquals(singletonList(fieldSort("_doc").order(SortOrder.ASC)), sourceBuilder.sorts()); } + public void testTrackHits() { + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(new QueryContainer().withTrackHits(), null, + randomIntBetween(1, 10)); + assertEquals("Should have tracked hits", Integer.valueOf(SearchContext.TRACK_TOTAL_HITS_ACCURATE), + sourceBuilder.trackTotalHitsUpTo()); + } + public void testNoSortIfAgg() { QueryContainer container = new QueryContainer() .addGroups(singletonList(new GroupByValue("group_id", "group_column"))) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java index 0561b6820641d..0491c77070e4d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java @@ -9,22 +9,23 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; import org.elasticsearch.xpack.sql.util.DateUtils; -import java.io.IOException; import java.time.ZoneId; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase { public static CompositeKeyExtractor randomCompositeKeyExtractor() { - return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeZone()); + return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeZone(), randomBoolean()); } @Override @@ -38,19 +39,23 @@ protected Reader instanceReader() { } @Override - protected CompositeKeyExtractor mutateInstance(CompositeKeyExtractor instance) throws IOException { - return new CompositeKeyExtractor(instance.key() + "mutated", instance.property(), instance.zoneId()); + protected CompositeKeyExtractor mutateInstance(CompositeKeyExtractor instance) { + return new CompositeKeyExtractor( + instance.key() + "mutated", + randomValueOtherThan(instance.property(), () -> randomFrom(Property.values())), + randomValueOtherThan(instance.zoneId(), ESTestCase::randomZone), + !instance.isDateTimeBased()); } public void testExtractBucketCount() { Bucket bucket = new TestBucket(emptyMap(), randomLong(), new Aggregations(emptyList())); CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.COUNT, - randomZone()); + randomZone(), false); assertEquals(bucket.getDocCount(), extractor.extract(bucket)); } public void testExtractKey() { - CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, null); + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, UTC, false); Object value = new Object(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), value), randomLong(), new Aggregations(emptyList())); @@ -58,7 +63,7 @@ public void testExtractKey() { } public void testExtractDate() { - CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeZone()); + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeZone(), true); long millis = System.currentTimeMillis(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList())); @@ -66,7 +71,7 @@ public void testExtractDate() { } public void testExtractIncorrectDateKey() { - CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomZone()); + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomZone(), true); Object value = new Object(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), value), randomLong(), new Aggregations(emptyList())); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index c628b090df2a6..6db6c5abd166b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -29,6 +29,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; +import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; public class ComputingExtractorTests extends AbstractWireSerializingTestCase { public static ComputingExtractor randomComputingExtractor() { @@ -70,7 +71,7 @@ protected ComputingExtractor mutateInstance(ComputingExtractor instance) throws public void testGet() { String fieldName = randomAlphaOfLength(5); ChainingProcessor extractor = new ChainingProcessor( - new HitExtractorProcessor(new FieldHitExtractor(fieldName, null, true)), + new HitExtractorProcessor(new FieldHitExtractor(fieldName, null, UTC, true, false)), new MathProcessor(MathOperation.LOG)); int times = between(1, 1000); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 2e66192fbcbfc..a1c87f1734c92 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -18,6 +18,9 @@ import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -28,14 +31,17 @@ import java.util.function.Supplier; import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; import static org.hamcrest.Matchers.is; public class FieldHitExtractorTests extends AbstractWireSerializingTestCase { + public static FieldHitExtractor randomFieldHitExtractor() { String hitName = randomAlphaOfLength(5); String name = randomAlphaOfLength(5) + "." + hitName; - return new FieldHitExtractor(name, null, randomBoolean(), hitName); + return new FieldHitExtractor(name, null, randomZone(), randomBoolean(), hitName, false); } @Override @@ -50,7 +56,13 @@ protected Reader instanceReader() { @Override protected FieldHitExtractor mutateInstance(FieldHitExtractor instance) { - return new FieldHitExtractor(instance.fieldName() + "mutated", null, true, instance.hitName()); + return new FieldHitExtractor( + instance.fieldName() + "mutated", + randomValueOtherThan(instance.dataType(), () -> randomFrom(DataType.values())), + randomValueOtherThan(instance.zoneId(), ESTestCase::randomZone), + randomBoolean(), + instance.hitName() + "mutated", + randomBoolean()); } public void testGetDottedValueWithDocValues() { @@ -59,7 +71,7 @@ public void testGetDottedValueWithDocValues() { String child = randomAlphaOfLength(5); String fieldName = grandparent + "." + parent + "." + child; - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, true); + FieldHitExtractor extractor = getFieldHitExtractor(fieldName, true); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -83,7 +95,7 @@ public void testGetDottedValueWithSource() throws Exception { String child = randomAlphaOfLength(5); String fieldName = grandparent + "." + parent + "." + child; - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, false); + FieldHitExtractor extractor = getFieldHitExtractor(fieldName, false); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -116,13 +128,13 @@ public void testGetDottedValueWithSource() throws Exception { BytesReference sourceRef = BytesReference.bytes(source); hit.sourceRef(sourceRef); Object extract = extractor.extract(hit); - assertEquals(hasSource ? value : null, extract); + assertFieldHitEquals(hasSource ? value : null, extract); } } public void testGetDocValue() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, true); + FieldHitExtractor extractor = getFieldHitExtractor(fieldName, true); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -139,18 +151,19 @@ public void testGetDocValue() { } public void testGetDate() { + ZoneId zoneId = randomZone(); long millis = 1526467911780L; List documentFieldValues = Collections.singletonList(Long.toString(millis)); SearchHit hit = new SearchHit(1); DocumentField field = new DocumentField("my_date_field", documentFieldValues); hit.fields(singletonMap("my_date_field", field)); - FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATETIME, true); - assertEquals(DateUtils.asDateTime(millis), extractor.extract(hit)); + FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATETIME, zoneId, true); + assertEquals(DateUtils.asDateTime(millis, zoneId), extractor.extract(hit)); } public void testGetSource() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, false); + FieldHitExtractor extractor = getFieldHitExtractor(fieldName, false); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -168,17 +181,18 @@ public void testGetSource() throws IOException { source.endObject(); BytesReference sourceRef = BytesReference.bytes(source); hit.sourceRef(sourceRef); - assertEquals(value, extractor.extract(hit)); + assertFieldHitEquals(value, extractor.extract(hit)); } } public void testToString() { - assertEquals("hit.field@hit", new FieldHitExtractor("hit.field", null, true, "hit").toString()); + assertEquals("hit.field@hit@Europe/Berlin", + new FieldHitExtractor("hit.field", null, ZoneId.of("Europe/Berlin"), true, "hit", false).toString()); } public void testMultiValuedDocValue() { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, true); + FieldHitExtractor fe = getFieldHitExtractor(fieldName, true); SearchHit hit = new SearchHit(1); DocumentField field = new DocumentField(fieldName, asList("a", "b")); hit.fields(singletonMap(fieldName, field)); @@ -188,7 +202,7 @@ public void testMultiValuedDocValue() { public void testMultiValuedSourceValue() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, false); + FieldHitExtractor fe = getFieldHitExtractor(fieldName, false); SearchHit hit = new SearchHit(1); XContentBuilder source = JsonXContent.contentBuilder(); source.startObject(); { @@ -203,7 +217,7 @@ public void testMultiValuedSourceValue() throws IOException { public void testSingleValueArrayInSource() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, false); + FieldHitExtractor fe = getFieldHitExtractor(fieldName, false); SearchHit hit = new SearchHit(1); XContentBuilder source = JsonXContent.contentBuilder(); Object value = randomValue(); @@ -213,18 +227,18 @@ public void testSingleValueArrayInSource() throws IOException { source.endObject(); BytesReference sourceRef = BytesReference.bytes(source); hit.sourceRef(sourceRef); - assertEquals(value, fe.extract(hit)); + assertFieldHitEquals(value, fe.extract(hit)); } public void testExtractSourcePath() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c", false); Object value = randomValue(); Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); assertThat(fe.extractFromSource(map), is(value)); } public void testExtractSourceIncorrectPath() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c.d", false); Object value = randomNonNullValue(); Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); @@ -232,41 +246,56 @@ public void testExtractSourceIncorrectPath() { } public void testMultiValuedSource() { - FieldHitExtractor fe = new FieldHitExtractor("a", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a", false); Object value = randomValue(); Map map = singletonMap("a", asList(value, value)); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); assertThat(ex.getMessage(), is("Arrays (returned by [a]) are not supported")); } + public void testMultiValuedSourceAllowed() { + FieldHitExtractor fe = new FieldHitExtractor("a", null, UTC, false, true); + Object valueA = randomValue(); + Object valueB = randomValue(); + Map map = singletonMap("a", asList(valueA, valueB)); + assertEquals(valueA, fe.extractFromSource(map)); + } + public void testFieldWithDots() { - FieldHitExtractor fe = new FieldHitExtractor("a.b", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b", false); Object value = randomValue(); Map map = singletonMap("a.b", value); assertEquals(value, fe.extractFromSource(map)); } public void testNestedFieldWithDots() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c", false); Object value = randomValue(); Map map = singletonMap("a", singletonMap("b.c", value)); assertEquals(value, fe.extractFromSource(map)); } public void testNestedFieldWithDotsWithNestedField() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c.d", false); Object value = randomValue(); Map map = singletonMap("a", singletonMap("b.c", singletonMap("d", value))); assertEquals(value, fe.extractFromSource(map)); } public void testNestedFieldWithDotsWithNestedFieldWithDots() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d.e", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c.d.e", false); Object value = randomValue(); Map map = singletonMap("a", singletonMap("b.c", singletonMap("d.e", value))); assertEquals(value, fe.extractFromSource(map)); } + public void testEmptyArrayOfValues() { + FieldHitExtractor fe = new FieldHitExtractor("test_field", null, UTC, false, randomBoolean()); + Map map = singletonMap("test_field", Collections.emptyList()); + assertNull(fe.extractFromSource(map)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testNestedFieldsWithDotsAndRandomHiearachy() { String[] path = new String[100]; StringJoiner sj = new StringJoiner("."); @@ -274,7 +303,8 @@ public void testNestedFieldsWithDotsAndRandomHiearachy() { path[i] = randomAlphaOfLength(randomIntBetween(1, 10)); sj.add(path[i]); } - FieldHitExtractor fe = new FieldHitExtractor(sj.toString(), null, false); + boolean arrayLeniency = randomBoolean(); + FieldHitExtractor fe = new FieldHitExtractor(sj.toString(), null, UTC, false, arrayLeniency); List paths = new ArrayList<>(path.length); int start = 0; @@ -288,16 +318,46 @@ public void testNestedFieldsWithDotsAndRandomHiearachy() { start = end; } + /* + * Randomize how many values the field to look for will have (1 - 3). It's not really relevant how many values there are in the list + * but that the list has one element or more than one. + * If it has one value, then randomize the way it's indexed: as a single-value array or not e.g.: "a":"value" or "a":["value"]. + * If it has more than one value, it will always be an array e.g.: "a":["v1","v2","v3"]. + */ + int valuesCount = randomIntBetween(1, 3); Object value = randomValue(); + if (valuesCount == 1) { + value = randomBoolean() ? singletonList(value) : value; + } else { + value = new ArrayList(valuesCount); + for(int i = 0; i < valuesCount; i++) { + ((List) value).add(randomValue()); + } + } + + // the path to the randomly generated fields path + StringBuilder expected = new StringBuilder(paths.get(paths.size() - 1)); + // the actual value we will be looking for in the test at the end Map map = singletonMap(paths.get(paths.size() - 1), value); + // build the rest of the path and the expected path to check against in the error message for (int i = paths.size() - 2; i >= 0; i--) { - map = singletonMap(paths.get(i), map); + map = singletonMap(paths.get(i), randomBoolean() ? singletonList(map) : map); + expected.insert(0, paths.get(i) + "."); + } + + if (valuesCount == 1 || arrayLeniency) { + // if the number of generated values is 1, just check we return the correct value + assertEquals(value instanceof List ? ((List) value).get(0) : value, fe.extractFromSource(map)); + } else { + // if we have an array with more than one value in it, check that we throw the correct exception and exception message + final Map map2 = Collections.unmodifiableMap(map); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map2)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + expected + "]) are not supported")); } - assertEquals(value, fe.extractFromSource(map)); } public void testExtractSourceIncorrectPathWithFieldWithDots() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d.e", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c.d.e", false); Object value = randomNonNullValue(); Map map = singletonMap("a", singletonMap("b.c", singletonMap("d", value))); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); @@ -305,8 +365,8 @@ public void testExtractSourceIncorrectPathWithFieldWithDots() { } public void testFieldWithDotsAndCommonPrefix() { - FieldHitExtractor fe1 = new FieldHitExtractor("a.d", null, false); - FieldHitExtractor fe2 = new FieldHitExtractor("a.b.c", null, false); + FieldHitExtractor fe1 = getFieldHitExtractor("a.d", false); + FieldHitExtractor fe2 = getFieldHitExtractor("a.b.c", false); Object value = randomNonNullValue(); Map map = new HashMap<>(); map.put("a", singletonMap("d", value)); @@ -316,8 +376,8 @@ public void testFieldWithDotsAndCommonPrefix() { } public void testFieldWithDotsAndCommonPrefixes() { - FieldHitExtractor fe1 = new FieldHitExtractor("a1.b.c.d1.e.f.g1", null, false); - FieldHitExtractor fe2 = new FieldHitExtractor("a2.b.c.d2.e.f.g2", null, false); + FieldHitExtractor fe1 = getFieldHitExtractor("a1.b.c.d1.e.f.g1", false); + FieldHitExtractor fe2 = getFieldHitExtractor("a2.b.c.d2.e.f.g2", false); Object value = randomNonNullValue(); Map map = new HashMap<>(); map.put("a1", singletonMap("b.c", singletonMap("d1", singletonMap("e.f", singletonMap("g1", value))))); @@ -327,7 +387,7 @@ public void testFieldWithDotsAndCommonPrefixes() { } public void testFieldWithDotsAndSamePathButDifferentHierarchy() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d.e.f.g", null, false); + FieldHitExtractor fe = getFieldHitExtractor("a.b.c.d.e.f.g", false); Object value = randomNonNullValue(); Map map = new HashMap<>(); map.put("a.b", singletonMap("c", singletonMap("d.e", singletonMap("f.g", value)))); @@ -335,10 +395,55 @@ public void testFieldWithDotsAndSamePathButDifferentHierarchy() { SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); assertThat(ex.getMessage(), is("Multiple values (returned by [a.b.c.d.e.f.g]) are not supported")); } + + public void testFieldsWithSingleValueArrayAsSubfield() { + FieldHitExtractor fe = getFieldHitExtractor("a.b", false); + Object value = randomNonNullValue(); + Map map = new HashMap<>(); + // "a" : [{"b" : "value"}] + map.put("a", singletonList(singletonMap("b", value))); + assertEquals(value, fe.extractFromSource(map)); + } + + public void testFieldsWithMultiValueArrayAsSubfield() { + FieldHitExtractor fe = getFieldHitExtractor("a.b", false); + Map map = new HashMap<>(); + // "a" : [{"b" : "value1"}, {"b" : "value2"}] + map.put("a", asList(singletonMap("b", randomNonNullValue()), singletonMap("b", randomNonNullValue()))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); + assertThat(ex.getMessage(), is("Arrays (returned by [a.b]) are not supported")); + } + + public void testFieldsWithSingleValueArrayAsSubfield_TwoNestedLists() { + FieldHitExtractor fe = getFieldHitExtractor("a.b.c", false); + Object value = randomNonNullValue(); + Map map = new HashMap<>(); + // "a" : [{"b" : [{"c" : "value"}]}] + map.put("a", singletonList(singletonMap("b", singletonList(singletonMap("c", value))))); + assertEquals(value, fe.extractFromSource(map)); + } + + public void testFieldsWithMultiValueArrayAsSubfield_ThreeNestedLists() { + FieldHitExtractor fe = getFieldHitExtractor("a.b.c", false); + Map map = new HashMap<>(); + // "a" : [{"b" : [{"c" : ["value1", "value2"]}]}] + map.put("a", singletonList(singletonMap("b", singletonList(singletonMap("c", asList("value1", "value2")))))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); + assertThat(ex.getMessage(), is("Arrays (returned by [a.b.c]) are not supported")); + } + + public void testFieldsWithSingleValueArrayAsSubfield_TwoNestedLists2() { + FieldHitExtractor fe = getFieldHitExtractor("a.b.c", false); + Object value = randomNonNullValue(); + Map map = new HashMap<>(); + // "a" : [{"b" : {"c" : ["value"]}]}] + map.put("a", singletonList(singletonMap("b", singletonMap("c", singletonList(value))))); + assertEquals(value, fe.extractFromSource(map)); + } public void testObjectsForSourceValue() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor fe = new FieldHitExtractor(fieldName, null, false); + FieldHitExtractor fe = getFieldHitExtractor(fieldName, false); SearchHit hit = new SearchHit(1); XContentBuilder source = JsonXContent.contentBuilder(); source.startObject(); { @@ -354,11 +459,18 @@ public void testObjectsForSourceValue() throws IOException { assertThat(ex.getMessage(), is("Objects (returned by [" + fieldName + "]) are not supported")); } + private FieldHitExtractor getFieldHitExtractor(String fieldName, boolean useDocValue) { + return new FieldHitExtractor(fieldName, null, UTC, useDocValue); + } + private Object randomValue() { Supplier value = randomFrom(Arrays.asList( () -> randomAlphaOfLength(10), ESTestCase::randomLong, ESTestCase::randomDouble, + ESTestCase::randomInt, + () -> BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE), + () -> new BigDecimal("20012312345621343256123456254.20012312345621343256123456254"), () -> null)); return value.get(); } @@ -367,7 +479,20 @@ private Object randomNonNullValue() { Supplier value = randomFrom(Arrays.asList( () -> randomAlphaOfLength(10), ESTestCase::randomLong, - ESTestCase::randomDouble)); + ESTestCase::randomDouble, + ESTestCase::randomInt, + () -> BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE), + () -> new BigDecimal("20012312345621343256123456254.20012312345621343256123456254"))); return value.get(); } + + private void assertFieldHitEquals(Object expected, Object actual) { + if (expected instanceof BigDecimal) { + // parsing will, by default, build a Double even if the initial value is BigDecimal + // Elasticsearch does this the same when returning the results + assertEquals(((BigDecimal) expected).doubleValue(), actual); + } else { + assertEquals(expected, actual); + } + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java index 12a8dd0420f0f..673899d98f3a8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java @@ -10,9 +10,12 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.time.ZoneId; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -22,7 +25,8 @@ public class MetricAggExtractorTests extends AbstractWireSerializingTestCase { public static MetricAggExtractor randomMetricAggExtractor() { - return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16)); + return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16), + randomZone(), randomBoolean()); } @Override @@ -37,7 +41,12 @@ protected Reader instanceReader() { @Override protected MetricAggExtractor mutateInstance(MetricAggExtractor instance) throws IOException { - return new MetricAggExtractor(instance.name() + "mutated", instance.property(), instance.innerKey()); + return new MetricAggExtractor( + instance.name() + "mutated", + instance.property() + "mutated", + instance.innerKey() + "mutated", + randomValueOtherThan(instance.zoneId(), + ESTestCase::randomZone), randomBoolean()); } public void testNoAggs() { @@ -48,7 +57,7 @@ public void testNoAggs() { } public void testSingleValueProperty() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double value = randomDouble(); Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), value); @@ -56,8 +65,18 @@ public void testSingleValueProperty() { assertEquals(value, extractor.extract(bucket)); } + public void testSingleValuePropertyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("my_date_field", "property", "innerKey", zoneId, true); + + double value = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), value); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) value , zoneId), extractor.extract(bucket)); + } + public void testSingleValueInnerKey() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double innerValue = randomDouble(); Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), singletonMap(extractor.innerKey(), innerValue)); @@ -65,12 +84,33 @@ public void testSingleValueInnerKey() { assertEquals(innerValue, extractor.extract(bucket)); } + public void testSingleValueInnerKeyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", zoneId, true); + + double innerValue = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), + singletonMap(extractor.innerKey(), innerValue)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) innerValue , zoneId), extractor.extract(bucket)); + } + public void testMultiValueProperty() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double value = randomDouble(); Aggregation agg = new TestMultiValueAggregation(extractor.name(), singletonMap(extractor.property(), value)); Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); assertEquals(value, extractor.extract(bucket)); } + + public void testMultiValuePropertyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", zoneId, true); + + double value = randomDouble(); + Aggregation agg = new TestMultiValueAggregation(extractor.name(), singletonMap(extractor.property(), value)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) value , zoneId), extractor.extract(bucket)); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java index 741fd5413be9c..821bfb9b19544 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java @@ -15,20 +15,23 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.metrics.InternalTopHits; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; +import java.time.ZoneId; import java.util.Collections; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; public class TopHitsAggExtractorTests extends AbstractWireSerializingTestCase { public static TopHitsAggExtractor randomTopHitsAggExtractor() { - return new TopHitsAggExtractor(randomAlphaOfLength(16), randomFrom(DataType.values())); + return new TopHitsAggExtractor(randomAlphaOfLength(16), randomFrom(DataType.values()), randomZone()); } @Override @@ -43,7 +46,10 @@ protected Reader instanceReader() { @Override protected TopHitsAggExtractor mutateInstance(TopHitsAggExtractor instance) { - return new TopHitsAggExtractor(instance.name() + "mutated", randomFrom(DataType.values())); + return new TopHitsAggExtractor( + instance.name() + "mutated", + randomValueOtherThan(instance.fieldDataType(), () -> randomFrom(DataType.values())), + randomValueOtherThan(instance.zoneId(), ESTestCase::randomZone)); } public void testNoAggs() { @@ -63,7 +69,7 @@ public void testZeroNullValue() { } public void testExtractValue() { - TopHitsAggExtractor extractor = new TopHitsAggExtractor("topHitsAgg", DataType.KEYWORD); + TopHitsAggExtractor extractor = new TopHitsAggExtractor("topHitsAgg", DataType.KEYWORD, UTC); String value = "Str_Value"; Aggregation agg = new InternalTopHits(extractor.name(), 0, 1, null, searchHitsOf(value), null, null); @@ -72,12 +78,13 @@ public void testExtractValue() { } public void testExtractDateValue() { - TopHitsAggExtractor extractor = new TopHitsAggExtractor("topHitsAgg", DataType.DATETIME); + ZoneId zoneId = randomZone(); + TopHitsAggExtractor extractor = new TopHitsAggExtractor("topHitsAgg", DataType.DATETIME, zoneId); long value = 123456789L; Aggregation agg = new InternalTopHits(extractor.name(), 0, 1, null, searchHitsOf(value), null, null); Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); - assertEquals(DateUtils.asDateTime(value), extractor.extract(bucket)); + assertEquals(DateUtils.asDateTime(value, zoneId), extractor.extract(bucket)); } private SearchHits searchHitsOf(Object value) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java index 101f4dfe78c4e..a810dac501ef5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.expression.function; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.Expression; @@ -13,7 +12,6 @@ import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.parser.ParsingException; -import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -25,6 +23,7 @@ import java.util.List; import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.TestUtils.randomConfiguration; import static org.elasticsearch.xpack.sql.expression.function.FunctionRegistry.def; import static org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction.ResolutionType.DISTINCT; import static org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction.ResolutionType.EXTRACT; @@ -231,30 +230,6 @@ private UnresolvedFunction uf(UnresolvedFunction.ResolutionType resolutionType, return new UnresolvedFunction(SourceTests.randomSource(), "DUMMY_FUNCTION", resolutionType, Arrays.asList(children)); } - private Configuration randomConfiguration() { - return new Configuration(randomZone(), - randomIntBetween(0, 1000), - new TimeValue(randomNonNegativeLong()), - new TimeValue(randomNonNegativeLong()), - null, - randomFrom(Mode.values()), - randomAlphaOfLength(10), - randomAlphaOfLength(10), - randomAlphaOfLength(10)); - } - - private Configuration randomConfiguration(ZoneId providedZoneId) { - return new Configuration(providedZoneId, - randomIntBetween(0, 1000), - new TimeValue(randomNonNegativeLong()), - new TimeValue(randomNonNegativeLong()), - null, - randomFrom(Mode.values()), - randomAlphaOfLength(10), - randomAlphaOfLength(10), - randomAlphaOfLength(10)); - } - public static class DummyFunction extends ScalarFunction { public DummyFunction(Source source) { super(source, emptyList()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java index 86e4baf9fdc06..6581781c70072 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/DatabaseFunctionTests.java @@ -31,7 +31,7 @@ public void testDatabaseFunctionOutput() { new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, randomFrom(Mode.values()), randomAlphaOfLength(10), - null, clusterName), + null, clusterName, randomBoolean()), new FunctionRegistry(), IndexResolution.valid(test), new Verifier(new Metrics()) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java index f4f48cb735045..190bc273d7a5e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/UserFunctionTests.java @@ -30,7 +30,7 @@ public void testNoUsernameFunctionOutput() { new Configuration(DateUtils.UTC, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, randomFrom(Mode.values()), randomAlphaOfLength(10), - null, randomAlphaOfLengthBetween(1, 15)), + null, randomAlphaOfLengthBetween(1, 15), randomBoolean()), new FunctionRegistry(), IndexResolution.valid(test), new Verifier(new Metrics()) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTests.java index 5eaa9ccd6c268..ff65414bf58ca 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTests.java @@ -6,18 +6,19 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; +import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.proto.Mode; -import org.elasticsearch.xpack.sql.proto.Protocol; -import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; import org.elasticsearch.xpack.sql.tree.Source; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.Objects; + public class CurrentDateTests extends AbstractNodeTestCase { public static CurrentDate randomCurrentDate() { - return new CurrentDate(Source.EMPTY, new Configuration(randomZone(), Protocol.FETCH_SIZE, - Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null)); + return new CurrentDate(Source.EMPTY, TestUtils.randomConfiguration()); } @Override @@ -32,8 +33,10 @@ protected CurrentDate copy(CurrentDate instance) { @Override protected CurrentDate mutate(CurrentDate instance) { - return new CurrentDate(instance.source(), new Configuration(randomZone(), Protocol.FETCH_SIZE, - Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null)); + ZonedDateTime now = instance.configuration().now(); + ZoneId mutatedZoneId = randomValueOtherThanMany(o -> Objects.equals(now.getOffset(), o.getRules().getOffset(now.toInstant())), + () -> randomZone()); + return new CurrentDate(instance.source(), TestUtils.randomConfiguration(mutatedZoneId)); } @Override diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java index 28b99c1863486..166490699bb97 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/CurrentDateTimeTests.java @@ -6,23 +6,33 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.TestUtils; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Literal; -import org.elasticsearch.xpack.sql.proto.Mode; -import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; +import org.elasticsearch.xpack.sql.type.TypesTests; +import java.time.ZoneId; import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; +import java.util.Objects; import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; public class CurrentDateTimeTests extends AbstractNodeTestCase { public static CurrentDateTime randomCurrentDateTime() { - return new CurrentDateTime(EMPTY, Literal.of(EMPTY, randomInt(10)), - new Configuration(randomZone(), Protocol.FETCH_SIZE, - Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null)); + return new CurrentDateTime(EMPTY, Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration()); } @Override @@ -37,9 +47,10 @@ protected CurrentDateTime copy(CurrentDateTime instance) { @Override protected CurrentDateTime mutate(CurrentDateTime instance) { - return new CurrentDateTime(instance.source(), Literal.of(EMPTY, randomInt(10)), - new Configuration(randomZone(), Protocol.FETCH_SIZE, - Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null)); + ZonedDateTime now = instance.configuration().now(); + ZoneId mutatedZoneId = randomValueOtherThanMany(o -> Objects.equals(now.getOffset(), o.getRules().getOffset(now.toInstant())), + ESTestCase::randomZone); + return new CurrentDateTime(instance.source(), Literal.of(EMPTY, randomInt(9)), TestUtils.randomConfiguration(mutatedZoneId)); } @Override @@ -63,4 +74,30 @@ public void testNanoPrecision() { assertEquals(123_456_780, CurrentDateTime.nanoPrecision(zdt, Literal.of(EMPTY, 8)).getNano()); assertEquals(123_456_789, CurrentDateTime.nanoPrecision(zdt, Literal.of(EMPTY, 9)).getNano()); } + + public void testDefaultPrecision() { + Configuration configuration = TestUtils.randomConfiguration(); + // null precision means default precision + CurrentDateTime cdt = new CurrentDateTime(EMPTY, null, configuration); + ZonedDateTime now = configuration.now(); + assertEquals(now.get(ChronoField.MILLI_OF_SECOND), ((ZonedDateTime) cdt.fold()).get(ChronoField.MILLI_OF_SECOND)); + + ZonedDateTime zdt = ZonedDateTime.parse("2019-02-26T12:34:56.123456789Z"); + assertEquals(123_000_000, CurrentDateTime.nanoPrecision(zdt, null).getNano()); + } + + public void testInvalidPrecision() { + SqlParser parser = new SqlParser(); + IndexResolution indexResolution = IndexResolution.valid(new EsIndex("test", + TypesTests.loadMapping("mapping-multi-field-with-nested.json"))); + + Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), indexResolution, new Verifier(new Metrics())); + ParsingException e = expectThrows(ParsingException.class, () -> + analyzer.analyze(parser.createStatement("SELECT CURRENT_TIMESTAMP(100000000000000)"), true)); + assertEquals("line 1:27: invalid precision; [100000000000000] out of [integer] range", e.getMessage()); + + e = expectThrows(ParsingException.class, () -> + analyzer.analyze(parser.createStatement("SELECT CURRENT_TIMESTAMP(100)"), true)); + assertEquals("line 1:27: precision needs to be between [0-9], received [100]", e.getMessage()); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java index 2ae6e571ac9d2..4323cce234c54 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java @@ -7,22 +7,15 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.util.DateUtils; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.time.ZonedDateTime; -import static org.junit.Assert.assertEquals; - public class DateTimeTestUtils { private DateTimeTestUtils() {} public static ZonedDateTime dateTime(int year, int month, int day, int hour, int minute) { - DateTime dateTime = new DateTime(year, month, day, hour, minute, DateTimeZone.UTC); - ZonedDateTime zdt = ZonedDateTime.of(year, month, day, hour, minute, 0, 0, DateUtils.UTC); - assertEquals(dateTime.getMillis() / 1000, zdt.toEpochSecond()); - return zdt; + return ZonedDateTime.of(year, month, day, hour, minute, 0, 0, DateUtils.UTC); } public static ZonedDateTime dateTime(long millisSinceEpoch) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java index 66ab6146ec0eb..ab1a9bf1f5cea 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java @@ -71,10 +71,10 @@ public void testRoundFunctionWithEdgeCasesInputs() { public void testRoundInputValidation() { SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> new Round(EMPTY, l(5), l("foobarbar")).makePipe().asProcessor().process(null)); - assertEquals("A number is required; received foobarbar", siae.getMessage()); + assertEquals("A number is required; received [foobarbar]", siae.getMessage()); siae = expectThrows(SqlIllegalArgumentException.class, () -> new Round(EMPTY, l("bla"), l(0)).makePipe().asProcessor().process(null)); - assertEquals("A number is required; received bla", siae.getMessage()); + assertEquals("A number is required; received [bla]", siae.getMessage()); siae = expectThrows(SqlIllegalArgumentException.class, () -> new Round(EMPTY, l(123.34), l(0.1)).makePipe().asProcessor().process(null)); assertEquals("An integer number is required; received [0.1] as second parameter", siae.getMessage()); @@ -103,10 +103,10 @@ public void testTruncateFunctionWithEdgeCasesInputs() { public void testTruncateInputValidation() { SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> new Truncate(EMPTY, l(5), l("foobarbar")).makePipe().asProcessor().process(null)); - assertEquals("A number is required; received foobarbar", siae.getMessage()); + assertEquals("A number is required; received [foobarbar]", siae.getMessage()); siae = expectThrows(SqlIllegalArgumentException.class, () -> new Truncate(EMPTY, l("bla"), l(0)).makePipe().asProcessor().process(null)); - assertEquals("A number is required; received bla", siae.getMessage()); + assertEquals("A number is required; received [bla]", siae.getMessage()); siae = expectThrows(SqlIllegalArgumentException.class, () -> new Truncate(EMPTY, l(123.34), l(0.1)).makePipe().asProcessor().process(null)); assertEquals("An integer number is required; received [0.1] as second parameter", siae.getMessage()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessorTests.java index 5354dbd9ed116..06daed00a537c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ConstantProcessorTests.java @@ -7,9 +7,12 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.literal.IntervalDayTime; +import org.elasticsearch.xpack.sql.type.DataType; import java.io.IOException; +import java.time.Duration; +import java.util.concurrent.TimeUnit; public class ConstantProcessorTests extends AbstractWireSerializingTestCase { public static ConstantProcessor randomConstantProcessor() { @@ -28,7 +31,10 @@ protected Reader instanceReader() { @Override protected ConstantProcessor mutateInstance(ConstantProcessor instance) throws IOException { - return new ConstantProcessor(randomValueOtherThan(instance.process(null), () -> randomAlphaOfLength(5))); + return new ConstantProcessor(randomValueOtherThan(instance.process(null), + () -> new IntervalDayTime(Duration.ofSeconds( + randomLongBetween(TimeUnit.SECONDS.convert(3, TimeUnit.HOURS), TimeUnit.SECONDS.convert(23, TimeUnit.HOURS))), + DataType.INTERVAL_DAY_TO_SECOND))); } public void testApply() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index 286524518e960..07f36d3f124d8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -175,9 +175,12 @@ private static Literal L(Object value) { } private static FieldAttribute getFieldAttribute() { - return new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); + return getFieldAttribute("a"); } + private static FieldAttribute getFieldAttribute(String name) { + return new FieldAttribute(EMPTY, name, new EsField(name + "f", DataType.INTEGER, emptyMap(), true)); + } public void testPruneSubqueryAliases() { ShowTables s = new ShowTables(EMPTY, null, null); @@ -478,6 +481,7 @@ public void testSimplifyCoalesceRandomNullsWithValue() { randomListOfNulls()))); assertEquals(1, e.children().size()); assertEquals(Literal.TRUE, e.children().get(0)); + assertEquals(DataType.BOOLEAN, e.dataType()); } private List randomListOfNulls() { @@ -491,6 +495,7 @@ public void testSimplifyCoalesceFirstLiteral() { assertEquals(Coalesce.class, e.getClass()); assertEquals(1, e.children().size()); assertEquals(Literal.TRUE, e.children().get(0)); + assertEquals(DataType.BOOLEAN, e.dataType()); } public void testSimplifyIfNullNulls() { @@ -504,11 +509,13 @@ public void testSimplifyIfNullWithNullAndValue() { assertEquals(IfNull.class, e.getClass()); assertEquals(1, e.children().size()); assertEquals(ONE, e.children().get(0)); + assertEquals(DataType.INTEGER, e.dataType()); e = new SimplifyConditional().rule(new IfNull(EMPTY, ONE, Literal.NULL)); assertEquals(IfNull.class, e.getClass()); assertEquals(1, e.children().size()); assertEquals(ONE, e.children().get(0)); + assertEquals(DataType.INTEGER, e.dataType()); } public void testFoldNullNotAppliedOnNullIf() { @@ -536,6 +543,7 @@ public void testSimplifyGreatestRandomNullsWithValue() { assertEquals(2, e.children().size()); assertEquals(ONE, e.children().get(0)); assertEquals(TWO, e.children().get(1)); + assertEquals(DataType.INTEGER, e.dataType()); } public void testSimplifyLeastNulls() { @@ -557,6 +565,7 @@ public void testSimplifyLeastRandomNullsWithValue() { assertEquals(2, e.children().size()); assertEquals(ONE, e.children().get(0)); assertEquals(TWO, e.children().get(1)); + assertEquals(DataType.INTEGER, e.dataType()); } public void testConcatFoldingIsNotNull() { @@ -1125,6 +1134,23 @@ public void testCombineBinaryComparisonsWithDifferentUpperLimitInclusion() { assertEquals(or, exp); } + // (a = 1 AND b = 3 AND c = 4) OR (a = 2 AND b = 3 AND c = 4) -> (b = 3 AND c = 4) AND (a = 1 OR a = 2) + public void testBooleanSimplificationCommonExpressionSubstraction() { + FieldAttribute fa = getFieldAttribute("a"); + FieldAttribute fb = getFieldAttribute("b"); + FieldAttribute fc = getFieldAttribute("c"); + + Expression a1 = new Equals(EMPTY, fa, ONE); + Expression a2 = new Equals(EMPTY, fa, TWO); + And common = new And(EMPTY, new Equals(EMPTY, fb, THREE), new Equals(EMPTY, fc, FOUR)); + And left = new And(EMPTY, a1, common); + And right = new And(EMPTY, a2, common); + Or or = new Or(EMPTY, left, right); + + Expression exp = new BooleanSimplification().rule(or); + assertEquals(new And(EMPTY, common, new Or(EMPTY, a1, a2)), exp); + } + // (0 < a <= 1) OR (0 < a < 2) -> 0 < a < 2 public void testRangesOverlappingNoLowerBoundary() { FieldAttribute fa = getFieldAttribute(); @@ -1226,7 +1252,7 @@ public void testTranslateMinToFirst() { List order = ((OrderBy) result).order(); assertEquals(2, order.size()); assertEquals(First.class, order.get(0).child().getClass()); - assertEquals(min2, order.get(1).child());; + assertEquals(min2, order.get(1).child()); First first = (First) order.get(0).child(); assertTrue(((OrderBy) result).child() instanceof Aggregate); @@ -1249,7 +1275,7 @@ public void testTranslateMaxToLast() { assertTrue(result instanceof OrderBy); List order = ((OrderBy) result).order(); assertEquals(Last.class, order.get(0).child().getClass()); - assertEquals(max2, order.get(1).child());; + assertEquals(max2, order.get(1).child()); Last last = (Last) order.get(0).child(); assertTrue(((OrderBy) result).child() instanceof Aggregate); @@ -1259,4 +1285,68 @@ public void testTranslateMaxToLast() { assertSame(last, aggregates.get(0)); assertEquals(max2, aggregates.get(1)); } + + public void testSortAggregateOnOrderByWithTwoFields() { + FieldAttribute firstField = new FieldAttribute(EMPTY, "first_field", new EsField("first_field", DataType.BYTE, emptyMap(), true)); + FieldAttribute secondField = new FieldAttribute(EMPTY, "second_field", + new EsField("second_field", DataType.BYTE, emptyMap(), true)); + Alias firstAlias = new Alias(EMPTY, "first_alias", firstField); + Alias secondAlias = new Alias(EMPTY, "second_alias", secondField); + Order firstOrderBy = new Order(EMPTY, firstField, OrderDirection.ASC, Order.NullsPosition.LAST); + Order secondOrderBy = new Order(EMPTY, secondField, OrderDirection.ASC, Order.NullsPosition.LAST); + + OrderBy orderByPlan = new OrderBy(EMPTY, + new Aggregate(EMPTY, FROM(), Arrays.asList(secondField, firstField), Arrays.asList(secondAlias, firstAlias)), + Arrays.asList(firstOrderBy, secondOrderBy)); + LogicalPlan result = new Optimizer.SortAggregateOnOrderBy().apply(orderByPlan); + + assertTrue(result instanceof OrderBy); + List order = ((OrderBy) result).order(); + assertEquals(2, order.size()); + assertTrue(order.get(0).child() instanceof FieldAttribute); + assertTrue(order.get(1).child() instanceof FieldAttribute); + assertEquals("first_field", ((FieldAttribute) order.get(0).child()).name()); + assertEquals("second_field", ((FieldAttribute) order.get(1).child()).name()); + + assertTrue(((OrderBy) result).child() instanceof Aggregate); + Aggregate agg = (Aggregate) ((OrderBy) result).child(); + List groupings = agg.groupings(); + assertEquals(2, groupings.size()); + assertTrue(groupings.get(0) instanceof FieldAttribute); + assertTrue(groupings.get(1) instanceof FieldAttribute); + assertEquals(firstField, groupings.get(0)); + assertEquals(secondField, groupings.get(1)); + } + + public void testSortAggregateOnOrderByOnlyAliases() { + FieldAttribute firstField = new FieldAttribute(EMPTY, "first_field", new EsField("first_field", DataType.BYTE, emptyMap(), true)); + FieldAttribute secondField = new FieldAttribute(EMPTY, "second_field", + new EsField("second_field", DataType.BYTE, emptyMap(), true)); + Alias firstAlias = new Alias(EMPTY, "first_alias", firstField); + Alias secondAlias = new Alias(EMPTY, "second_alias", secondField); + Order firstOrderBy = new Order(EMPTY, firstAlias, OrderDirection.ASC, Order.NullsPosition.LAST); + Order secondOrderBy = new Order(EMPTY, secondAlias, OrderDirection.ASC, Order.NullsPosition.LAST); + + OrderBy orderByPlan = new OrderBy(EMPTY, + new Aggregate(EMPTY, FROM(), Arrays.asList(secondAlias, firstAlias), Arrays.asList(secondAlias, firstAlias)), + Arrays.asList(firstOrderBy, secondOrderBy)); + LogicalPlan result = new Optimizer.SortAggregateOnOrderBy().apply(orderByPlan); + + assertTrue(result instanceof OrderBy); + List order = ((OrderBy) result).order(); + assertEquals(2, order.size()); + assertTrue(order.get(0).child() instanceof Alias); + assertTrue(order.get(1).child() instanceof Alias); + assertEquals("first_alias", ((Alias) order.get(0).child()).name()); + assertEquals("second_alias", ((Alias) order.get(1).child()).name()); + + assertTrue(((OrderBy) result).child() instanceof Aggregate); + Aggregate agg = (Aggregate) ((OrderBy) result).child(); + List groupings = agg.groupings(); + assertEquals(2, groupings.size()); + assertTrue(groupings.get(0) instanceof Alias); + assertTrue(groupings.get(1) instanceof Alias); + assertEquals(firstAlias, groupings.get(0)); + assertEquals(secondAlias, groupings.get(1)); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java index 8cbb0b528e9a6..a289c39a1865e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java @@ -175,7 +175,8 @@ public void testDateLiteral() { public void testDateLiteralValidation() { ParsingException ex = expectThrows(ParsingException.class, () -> dateLiteral("2012-13-01")); - assertEquals("line 1:2: Invalid date received; Cannot parse \"2012-13-01\": Value 13 for monthOfYear must be in the range [1,12]", + assertEquals("line 1:2: Invalid date received; Text '2012-13-01' could not be parsed: " + + "Invalid value for MonthOfYear (valid values 1 - 12): 13", ex.getMessage()); } @@ -186,7 +187,8 @@ public void testTimeLiteralUnsupported() { public void testTimeLiteralValidation() { ParsingException ex = expectThrows(ParsingException.class, () -> timeLiteral("10:10:65")); - assertEquals("line 1:2: Invalid time received; Cannot parse \"10:10:65\": Value 65 for secondOfMinute must be in the range [0,59]", + assertEquals("line 1:2: Invalid time received; Text '10:10:65' could not be parsed: " + + "Invalid value for SecondOfMinute (valid values 0 - 59): 65", ex.getMessage()); } @@ -198,7 +200,7 @@ public void testTimestampLiteral() { public void testTimestampLiteralValidation() { ParsingException ex = expectThrows(ParsingException.class, () -> timestampLiteral("2012-01-01T10:01:02.3456")); assertEquals( - "line 1:2: Invalid timestamp received; Invalid format: \"2012-01-01T10:01:02.3456\" is malformed at \"T10:01:02.3456\"", + "line 1:2: Invalid timestamp received; Text '2012-01-01T10:01:02.3456' could not be parsed at index 10", ex.getMessage()); } @@ -225,6 +227,21 @@ public void testGUIDValidationLength() { assertEquals("line 1:8: Invalid GUID, too short", ex.getMessage()); } + public void testCurrentTimestampAsEscapedExpression() { + Expression expr = parser.createExpression("{fn CURRENT_TIMESTAMP(2)}"); + assertEquals(UnresolvedFunction.class, expr.getClass()); + UnresolvedFunction ur = (UnresolvedFunction) expr; + assertEquals("{fn CURRENT_TIMESTAMP(2)}", ur.sourceText()); + assertEquals(1, ur.children().size()); + } + + public void testCurrentDateAsEscapedExpression() { + Expression expr = parser.createExpression("{fn CURRENT_DATE()}"); + assertEquals(UnresolvedFunction.class, expr.getClass()); + UnresolvedFunction ur = (UnresolvedFunction) expr; + assertEquals("{fn CURRENT_DATE()}", ur.sourceText()); + assertEquals(0, ur.children().size()); + } public void testLimit() { Limit limit = limit(10); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java index 229d39705cd82..3d00a40aedbe7 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java @@ -279,7 +279,7 @@ public void testCastWithQuotedDataType() { public void testCastWithInvalidDataType() { ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CAST(1 AS INVALID)")); - assertEquals("line 1:12: Does not recognize type invalid", ex.getMessage()); + assertEquals("line 1:12: Does not recognize type [INVALID]", ex.getMessage()); } public void testConvertWithUnquotedDataType() { @@ -338,22 +338,6 @@ public void testConvertWithInvalidESDataType() { assertEquals("line 1:13: Invalid data type [INVALID] provided", ex.getMessage()); } - public void testCurrentDate() { - Expression expr = parser.createExpression("CURRENT_DATE"); - assertEquals(UnresolvedFunction.class, expr.getClass()); - UnresolvedFunction ur = (UnresolvedFunction) expr; - assertEquals("CURRENT_DATE", ur.sourceText()); - assertEquals(0, ur.children().size()); - } - - public void testCurrentDateWithParentheses() { - Expression expr = parser.createExpression("CURRENT_DATE( )"); - assertEquals(UnresolvedFunction.class, expr.getClass()); - UnresolvedFunction ur = (UnresolvedFunction) expr; - assertEquals("CURRENT_DATE( )", ur.sourceText()); - assertEquals(0, ur.children().size()); - } - public void testCurrentTimestamp() { Expression expr = parser.createExpression("CURRENT_TIMESTAMP"); assertEquals(UnresolvedFunction.class, expr.getClass()); @@ -370,12 +354,23 @@ public void testCurrentTimestampPrecision() { assertEquals(1, ur.children().size()); Expression child = ur.children().get(0); assertEquals(Literal.class, child.getClass()); - assertEquals(Short.valueOf((short) 4), child.fold()); + assertEquals(4, child.fold()); } - public void testCurrentTimestampInvalidPrecision() { - ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CURRENT_TIMESTAMP(100)")); - assertEquals("line 1:20: Precision needs to be between [0-9], received [100]", ex.getMessage()); + public void testCurrentDate() { + Expression expr = parser.createExpression("CURRENT_DATE"); + assertEquals(UnresolvedFunction.class, expr.getClass()); + UnresolvedFunction ur = (UnresolvedFunction) expr; + assertEquals("CURRENT_DATE", ur.sourceText()); + assertEquals(0, ur.children().size()); + } + + public void testCurrentDateWithParentheses() { + Expression expr = parser.createExpression("CURRENT_DATE( )"); + assertEquals(UnresolvedFunction.class, expr.getClass()); + UnresolvedFunction ur = (UnresolvedFunction) expr; + assertEquals("CURRENT_DATE( )", ur.sourceText()); + assertEquals(0, ur.children().size()); } public void testSourceKeyword() throws Exception { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java index dd44a8e464ae4..095040a8859f5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -6,30 +6,21 @@ package org.elasticsearch.xpack.sql.parser; import com.google.common.base.Joiner; - import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.Order; import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.sql.expression.UnresolvedStar; import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Add; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryPrimaryDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryTermContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionDefaultContext; import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; -import org.elasticsearch.xpack.sql.plan.logical.With; import java.util.ArrayList; import java.util.List; @@ -65,6 +56,21 @@ public void testSelectScore() { assertEquals("SCORE()", f.sourceText()); } + public void testSelectCast() { + Cast f = singleProjection(project(parseStatement("SELECT CAST(POWER(languages, 2) AS DOUBLE) FROM foo")), Cast.class); + assertEquals("CAST(POWER(languages, 2) AS DOUBLE)", f.sourceText()); + } + + public void testSelectCastWithSQLOperator() { + Cast f = singleProjection(project(parseStatement("SELECT CONVERT(POWER(languages, 2), SQL_DOUBLE) FROM foo")), Cast.class); + assertEquals("CONVERT(POWER(languages, 2), SQL_DOUBLE)", f.sourceText()); + } + + public void testSelectCastToEsType() { + Cast f = singleProjection(project(parseStatement("SELECT CAST('0.' AS SCALED_FLOAT)")), Cast.class); + assertEquals("CAST('0.' AS SCALED_FLOAT)", f.sourceText()); + } + public void testSelectAddWithParanthesis() { Add f = singleProjection(project(parseStatement("SELECT (1 + 2)")), Add.class); assertEquals("1 + 2", f.sourceText()); @@ -177,86 +183,44 @@ public void testMultiMatchQuery() { assertThat(mmqp.optionMap(), hasEntry("fuzzy_rewrite", "scoring_boolean")); } - public void testLimitToPreventStackOverflowFromLongListOfQuotedIdentifiers() { - // Create expression in the form of "t"."field","t"."field", ... - - // 200 elements is ok - new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(200, "\"t\".\"field\"")) + " FROM t"); - - // 201 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(201, "\"t\".\"field\"")) + " FROM t")); - assertEquals("line 1:2409: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLongListOfUnQuotedIdentifiers() { - // Create expression in the form of t.field,t.field, ... - - // 250 elements is ok - new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(200, "t.field")) + " FROM t"); - - // 251 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(201, "t.field")) + " FROM t")); - assertEquals("line 1:1609: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLargeUnaryBooleanExpression() { - // Create expression in the form of NOT(NOT(NOT ... (b) ...) - - // 99 elements is ok - new SqlParser().createExpression( - Joiner.on("").join(nCopies(99, "NOT(")).concat("b").concat(Joiner.on("").join(nCopies(99, ")")))); - - // 100 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createExpression( - Joiner.on("").join(nCopies(100, "NOT(")).concat("b").concat(Joiner.on("").join(nCopies(100, ")"))))); - assertEquals("line 1:402: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - public void testLimitToPreventStackOverflowFromLargeBinaryBooleanExpression() { // Create expression in the form of a = b OR a = b OR ... a = b - // 100 elements is ok - new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(100, "a = b"))); + // 1000 elements is ok + new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(1000, "a = b"))); - // 101 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> - new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(101, "a = b")))); - assertEquals("line 1:902: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(5000, "a = b")))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeUnaryArithmeticExpression() { // Create expression in the form of abs(abs(abs ... (i) ...) - // 199 elements is ok + // 200 elements is ok new SqlParser().createExpression( - Joiner.on("").join(nCopies(199, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(199, ")")))); + Joiner.on("").join(nCopies(200, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(200, ")")))); - // 200 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createExpression( - Joiner.on("").join(nCopies(200, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(200, ")"))))); - assertEquals("line 1:802: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + Joiner.on("").join(nCopies(1000, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(1000, ")"))))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeBinaryArithmeticExpression() { // Create expression in the form of a + a + a + ... + a - // 200 elements is ok - new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(200, "a"))); + // 1000 elements is ok + new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(1000, "a"))); - // 201 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> - new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(201, "a")))); - assertEquals("line 1:802: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(5000, "a")))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeSubselectTree() { @@ -268,74 +232,13 @@ public void testLimitToPreventStackOverflowFromLargeSubselectTree() { .concat("t") .concat(Joiner.on("").join(nCopies(199, ")")))); - // 201 elements parser's "circuit breaker" is triggered + // 500 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(201, "SELECT * FROM")) + Joiner.on(" (").join(nCopies(500, "SELECT * FROM")) .concat("t") - .concat(Joiner.on("").join(nCopies(200, ")"))))); - assertEquals("line 1:3002: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLargeComplexSubselectTree() { - // Test with queries in the form of `SELECT true OR true OR .. FROM (SELECT true OR true OR... FROM (... t) ...) - - new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(20, "SELECT ")). - concat(Joiner.on(" OR ").join(nCopies(180, "true"))).concat(" FROM") - .concat("t").concat(Joiner.on("").join(nCopies(19, ")")))); - - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(20, "SELECT ")). - concat(Joiner.on(" OR ").join(nCopies(190, "true"))).concat(" FROM") - .concat("t").concat(Joiner.on("").join(nCopies(19, ")"))))); - assertEquals("line 1:1628: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitStackOverflowForInAndLiteralsIsNotApplied() { - int noChildren = 100_000; - LogicalPlan plan = parseStatement("SELECT * FROM t WHERE a IN(" + - Joiner.on(",").join(nCopies(noChildren, "a + b")) + ")"); - - assertEquals(With.class, plan.getClass()); - assertEquals(Project.class, ((With) plan).child().getClass()); - assertEquals(Filter.class, ((Project) ((With) plan).child()).child().getClass()); - Filter filter = (Filter) ((Project) ((With) plan).child()).child(); - assertEquals(In.class, filter.condition().getClass()); - In in = (In) filter.condition(); - assertEquals("?a", in.value().toString()); - assertEquals(noChildren, in.list().size()); - assertThat(in.list().get(0).toString(), startsWith("Add[?a,?b]")); - } - - public void testDecrementOfDepthCounter() { - SqlParser.CircuitBreakerListener cbl = new SqlParser.CircuitBreakerListener(); - StatementContext sc = new StatementContext(); - QueryTermContext qtc = new QueryTermContext(); - ValueExpressionContext vec = new ValueExpressionContext(); - BooleanExpressionContext bec = new BooleanExpressionContext(); - - cbl.enterEveryRule(sc); - cbl.enterEveryRule(sc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(vec); - cbl.enterEveryRule(bec); - cbl.enterEveryRule(bec); - - cbl.exitEveryRule(new StatementDefaultContext(sc)); - cbl.exitEveryRule(new StatementDefaultContext(sc)); - cbl.exitEveryRule(new QueryPrimaryDefaultContext(qtc)); - cbl.exitEveryRule(new QueryPrimaryDefaultContext(qtc)); - cbl.exitEveryRule(new ValueExpressionDefaultContext(vec)); - cbl.exitEveryRule(new SqlBaseParser.BooleanDefaultContext(bec)); - - assertEquals(0, cbl.depthCounts().get(SqlBaseParser.StatementContext.class.getSimpleName())); - assertEquals(1, cbl.depthCounts().get(SqlBaseParser.QueryTermContext.class.getSimpleName())); - assertEquals(0, cbl.depthCounts().get(SqlBaseParser.ValueExpressionContext.class.getSimpleName())); - assertEquals(1, cbl.depthCounts().get(SqlBaseParser.BooleanExpressionContext.class.getSimpleName())); + .concat(Joiner.on("").join(nCopies(499, ")"))))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } private LogicalPlan parseStatement(String sql) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index be656411656f4..bb4fb02ea7e85 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -5,19 +5,58 @@ */ package org.elasticsearch.xpack.sql.plan.logical.command.sys; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.TestUtils; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.stats.Metrics; +import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; import java.sql.Types; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.ActionListener.wrap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SysColumnsTests extends ESTestCase { + static final String CLUSTER_NAME = "cluster"; + + private final SqlParser parser = new SqlParser(); + private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); + private final IndexInfo index = new IndexInfo("test_emp", IndexType.INDEX); + private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + + public void testSysColumns() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, false); - assertEquals(16, rows.size()); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + randomValueOtherThanMany(Mode::isDriver, () -> randomFrom(Mode.values()))); + // nested fields are ignored + assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -37,21 +76,63 @@ public void testSysColumns() { assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(3); + assertEquals("keyword", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); row = rows.get(4); assertEquals("date", name(row)); assertEquals(Types.TIMESTAMP, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(24, precision(row)); + assertEquals(29, precision(row)); assertEquals(8, bufferLength(row)); + row = rows.get(5); + assertEquals("some.dotted.field", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(6); + assertEquals("some.string", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + row = rows.get(7); - assertEquals("some.dotted", name(row)); - assertEquals(Types.STRUCT, sqlType(row)); + assertEquals("some.string.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(8); + assertEquals("some.string.typical", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(9); + assertEquals("some.ambiguous", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(10); + assertEquals("some.ambiguous.one", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(11); + assertEquals("some.ambiguous.two", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(15); + row = rows.get(12); assertEquals("some.ambiguous.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); @@ -60,8 +141,9 @@ public void testSysColumns() { public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); - SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, true); - assertEquals(14, rows.size()); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + Mode.ODBC); + assertEquals(13, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -104,7 +186,7 @@ public void testSysColumnsInOdbcMode() { assertEquals("date", name(row)); assertEquals((short) Types.TIMESTAMP, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(24, precision(row)); + assertEquals(29, precision(row)); assertEquals(8, bufferLength(row)); assertNull(decimalPrecision(row)); assertEquals(Short.class, nullable(row).getClass()); @@ -112,18 +194,17 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(5); - assertEquals("unsupported", name(row)); - assertEquals((short) Types.OTHER, sqlType(row)); + assertEquals("some.dotted.field", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(0, precision(row)); - assertEquals(0, bufferLength(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); assertNull(decimalPrecision(row)); assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(6); - assertEquals("some.dotted.field", name(row)); + assertEquals("some.string", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -133,7 +214,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(7); - assertEquals("some.string", name(row)); + assertEquals("some.string.normalized", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -143,7 +224,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataTypeSub(row).getClass()); row = rows.get(8); - assertEquals("some.string.normalized", name(row)); + assertEquals("some.string.typical", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -151,9 +232,29 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - + row = rows.get(9); - assertEquals("some.string.typical", name(row)); + assertEquals("some.ambiguous", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(10); + assertEquals("some.ambiguous.one", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(11); + assertEquals("some.ambiguous.two", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); @@ -162,7 +263,7 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - row = rows.get(13); + row = rows.get(12); assertEquals("some.ambiguous.normalized", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); @@ -172,6 +273,141 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); } + + public void testSysColumnsInJdbcMode() { + List> rows = new ArrayList<>(); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, + Mode.JDBC); + assertEquals(13, rows.size()); + assertEquals(24, rows.get(0).size()); + + List row = rows.get(0); + assertEquals("bool", name(row)); + assertEquals(Types.BOOLEAN, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(1, bufferLength(row)); + + row = rows.get(1); + assertEquals("int", name(row)); + assertEquals(Types.INTEGER, sqlType(row)); + assertEquals(Integer.class, radix(row).getClass()); + assertEquals(4, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(2); + assertEquals("text", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(3); + assertEquals("keyword", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(4); + assertEquals("date", name(row)); + assertEquals(Types.TIMESTAMP, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(29, precision(row)); + assertEquals(8, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(5); + assertEquals("some.dotted.field", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(6); + assertEquals("some.string", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(7); + assertEquals("some.string.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(8); + assertEquals("some.string.typical", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(9); + assertEquals("some.ambiguous", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(10); + assertEquals("some.ambiguous.one", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(11); + assertEquals("some.ambiguous.two", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(12); + assertEquals("some.ambiguous.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Integer.class, nullable(row).getClass()); + assertEquals(Integer.class, sqlDataType(row).getClass()); + assertEquals(Integer.class, sqlDataTypeSub(row).getClass()); + } private static Object name(List list) { return list.get(3); @@ -208,4 +444,88 @@ private static Object sqlDataType(List list) { private static Object sqlDataTypeSub(List list) { return list.get(14); } + + public void testSysColumnsNoArg() throws Exception { + executeCommand("SYS COLUMNS", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + // no index specified + assertEquals("", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithCatalogWildcard() throws Exception { + executeCommand("SYS COLUMNS CATALOG 'cluster' TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithMissingCatalog() throws Exception { + executeCommand("SYS COLUMNS TABLE LIKE 'test' LIKE '%'", emptyList(), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + public void testSysColumnsWithNullCatalog() throws Exception { + executeCommand("SYS COLUMNS CATALOG ? TABLE LIKE 'test' LIKE '%'", singletonList(new SqlTypedParamValue("keyword", null)), r -> { + assertEquals(13, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("bool", r.column(3)); + r.advanceRow(); + assertEquals(CLUSTER_NAME, r.column(0)); + assertEquals("test", r.column(2)); + assertEquals("int", r.column(3)); + }, mapping); + } + + @SuppressWarnings({ "unchecked" }) + private void executeCommand(String sql, List params, Consumer consumer, Map mapping) + throws Exception { + Tuple tuple = sql(sql, params, mapping); + + IndexResolver resolver = tuple.v2().indexResolver(); + + EsIndex test = new EsIndex("test", mapping); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[2]).onResponse(IndexResolution.valid(test)); + return Void.TYPE; + }).when(resolver).resolveAsMergedMapping(any(), any(), any()); + + tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + } + + private Tuple sql(String sql, List params, Map mapping) { + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), + new Verifier(new Metrics())); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql, params), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn(CLUSTER_NAME); + + SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); + return new Tuple<>(cmd, session); + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java deleted file mode 100644 index a1accd28ab4d9..0000000000000 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.plan.logical.command.sys; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; -import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; -import org.elasticsearch.xpack.sql.analysis.index.EsIndex; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; -import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; -import org.elasticsearch.xpack.sql.parser.SqlParser; -import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.stats.Metrics; -import org.elasticsearch.xpack.sql.type.DataType; -import org.elasticsearch.xpack.sql.type.EsField; -import org.elasticsearch.xpack.sql.type.TypesTests; - -import java.util.List; -import java.util.Map; - -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SysParserTests extends ESTestCase { - - private final SqlParser parser = new SqlParser(); - private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); - - @SuppressWarnings({ "rawtypes", "unchecked" }) - private Tuple sql(String sql) { - EsIndex test = new EsIndex("test", mapping); - Analyzer analyzer = new Analyzer(TestUtils.TEST_CFG, new FunctionRegistry(), IndexResolution.valid(test), - new Verifier(new Metrics())); - Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); - - IndexResolver resolver = mock(IndexResolver.class); - when(resolver.clusterName()).thenReturn("cluster"); - - doAnswer(invocation -> { - ((ActionListener) invocation.getArguments()[2]).onResponse(singletonList(test)); - return Void.TYPE; - }).when(resolver).resolveAsSeparateMappings(any(), any(), any()); - - SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); - return new Tuple<>(cmd, session); - } - - public void testSysTypes() throws Exception { - Command cmd = sql("SYS TYPES").v1(); - - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", - "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", - "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", - "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", - "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", - "UNSUPPORTED", "OBJECT", "NESTED"); - - cmd.execute(null, ActionListener.wrap(r -> { - assertEquals(19, r.columnCount()); - assertEquals(DataType.values().length, r.size()); - assertFalse(r.schema().types().contains(DataType.NULL)); - // test numeric as signed - assertFalse(r.column(9, Boolean.class)); - // make sure precision is returned as boolean (not int) - assertFalse(r.column(10, Boolean.class)); - // no auto-increment - assertFalse(r.column(11, Boolean.class)); - - for (int i = 0; i < r.size(); i++) { - assertEquals(names.get(i), r.column(0)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsNoArgs() throws Exception { - runSysColumns("SYS COLUMNS"); - } - - public void testSysColumnEmptyCatalog() throws Exception { - Tuple sql = sql("SYS COLUMNS CATALOG '' TABLE LIKE '%' LIKE '%'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyCatalog() throws Exception { - Tuple sql = sql("SYS COLUMNS CATALOG 'catalog'"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(0, r.size()); - }, ex -> fail(ex.getMessage()))); - } - - public void testSysColsTableOnlyPattern() throws Exception { - runSysColumns("SYS COLUMNS TABLE LIKE 'test'"); - } - - public void testSysColsColOnlyPattern() throws Exception { - runSysColumns("SYS COLUMNS LIKE '%'"); - } - - public void testSysColsTableAndColsPattern() throws Exception { - runSysColumns("SYS COLUMNS TABLE LIKE 'test' LIKE '%'"); - } - - - private void runSysColumns(String commandVariation) throws Exception { - Tuple sql = sql(commandVariation); - List names = asList("bool", - "int", - "text", - "keyword", - "unsupported", - "date", - "some", - "some.dotted", - "some.dotted.field", - "some.string", - "some.string.normalized", - "some.string.typical", - "some.ambiguous", - "some.ambiguous.one", - "some.ambiguous.two", - "some.ambiguous.normalized", - "dep", - "dep.dep_name", - "dep.dep_id", - "dep.dep_id.keyword", - "dep.end_date", - "dep.start_date"); - - sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { - assertEquals(24, r.columnCount()); - assertEquals(22, r.size()); - - for (int i = 0; i < r.size(); i++) { - assertEquals("cluster", r.column(0)); - assertNull(r.column(1)); - assertEquals("test", r.column(2)); - assertEquals(names.get(i), r.column(3)); - r.advanceRow(); - } - - }, ex -> fail(ex.getMessage()))); - } -} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index d319fdb2a8feb..769bff8ee2d7b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -51,20 +51,84 @@ public class SysTablesTests extends ESTestCase { private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); - public void testSysTablesEnumerateCatalog() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%'", r -> { + // + // catalog enumeration + // + public void testSysTablesCatalogEnumerationWithEmptyType() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE ''", r -> { assertEquals(1, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); - }); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, index); + } + + public void testSysTablesCatalogAllTypes() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE '%'", r -> { + assertEquals(1, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, new IndexInfo[0]); + } + + // when types are null, consider them equivalent to '' for compatibility reasons + public void testSysTablesCatalogNoTypes() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { + assertEquals(1, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, index); } - public void testSysTablesEnumerateTypes() throws Exception { - executeCommand("SYS TABLES TYPE '%'", r -> { + + // + // table types enumeration + // + + // missing type means pattern + public void testSysTablesTypesEnumerationWoString() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { assertEquals(2, r.size()); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); assertEquals("VIEW", r.column(3)); - }); + }, alias, index); + } + + public void testSysTablesTypesEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { + assertEquals(2, r.size()); + + Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); + + for (int t = 0; t < r.size(); t++) { + assertEquals(it.next().toSql(), r.column(3)); + + // everything else should be null + for (int i = 0; i < 10; i++) { + if (i != 3) { + assertNull(r.column(i)); + } + } + + r.advanceRow(); + } + }, new IndexInfo[0]); + } + + // when a type is specified, apply filtering + public void testSysTablesTypesEnumerationAllCatalogsAndSpecifiedView() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE '' TYPE 'VIEW'", r -> { + assertEquals(0, r.size()); + }, new IndexInfo[0]); } public void testSysTablesDifferentCatalog() throws Exception { @@ -77,17 +141,42 @@ public void testSysTablesDifferentCatalog() throws Exception { public void testSysTablesNoTypes() throws Exception { executeCommand("SYS TABLES", r -> { assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithLegacyTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithProperTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'BASE TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); assertEquals("VIEW", r.column(3)); }, index, alias); } public void testSysTablesPattern() throws Exception { executeCommand("SYS TABLES LIKE '%'", r -> { + assertEquals(2, r.size()); assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); - assertEquals(2, r.size()); assertEquals("alias", r.column(2)); }, index, alias); } @@ -130,7 +219,18 @@ public void testSysTablesOnlyIndicesInLegacyMode() throws Exception { assertEquals("test", r.column(2)); assertEquals("TABLE", r.column(3)); }, index); + } + public void testSysTablesNoPatternWithTypesSpecifiedInLegacyMode() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE','VIEW'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + + }, index, alias); } public void testSysTablesOnlyIndicesLegacyModeParameterized() throws Exception { @@ -186,50 +286,19 @@ public void testSysTablesWithCatalogOnlyAliases() throws Exception { }, alias); } - public void testSysTablesWithInvalidType() throws Exception { - executeCommand("SYS TABLES LIKE 'test' TYPE 'QUE HORA ES'", r -> { + public void testSysTablesWithEmptyCatalogOnlyAliases() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE 'test' TYPE 'VIEW'", r -> { assertEquals(0, r.size()); - }, new IndexInfo[0]); - } - - public void testSysTablesCatalogEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { - assertEquals(1, r.size()); - assertEquals(CLUSTER_NAME, r.column(0)); - // everything else should be null - for (int i = 1; i < 10; i++) { - assertNull(r.column(i)); - } - }, new IndexInfo[0]); - } - - public void testSysTablesTypesEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { - assertEquals(2, r.size()); - - Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); - - for (int t = 0; t < r.size(); t++) { - assertEquals(it.next().toSql(), r.column(3)); - - // everything else should be null - for (int i = 0; i < 10; i++) { - if (i != 3) { - assertNull(r.column(i)); - } - } - - r.advanceRow(); - } - }, new IndexInfo[0]); + }, alias); } - public void testSysTablesTypesEnumerationWoString() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { + public void testSysTablesWithInvalidType() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'QUE HORA ES'", r -> { assertEquals(0, r.size()); }, new IndexInfo[0]); } + private SqlTypedParamValue param(Object value) { return new SqlTypedParamValue(DataTypes.fromJava(value).typeName, value); } @@ -243,7 +312,7 @@ private Tuple sql(String sql, List para IndexResolver resolver = mock(IndexResolver.class); when(resolver.clusterName()).thenReturn(CLUSTER_NAME); - SqlSession session = new SqlSession(null, null, null, resolver, null, null, null, null, null); + SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } @@ -265,4 +334,4 @@ private void executeCommand(String sql, List params, Consume tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index e6061f197149b..4e428846dc2f4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -36,11 +36,11 @@ private Tuple sql(String sql) { Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), false); IndexResolver resolver = mock(IndexResolver.class); - SqlSession session = new SqlSession(null, null, null, resolver, null, null, null, null, null); + SqlSession session = new SqlSession(TestUtils.TEST_CFG, null, null, resolver, null, null, null, null, null); return new Tuple<>(cmd, session); } - public void testSysTypes() throws Exception { + public void testSysTypes() { Command cmd = sql("SYS TYPES").v1(); List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", @@ -69,7 +69,7 @@ public void testSysTypes() throws Exception { }, ex -> fail(ex.getMessage()))); } - public void testSysTypesDefaultFiltering() throws Exception { + public void testSysTypesDefaultFiltering() { Command cmd = sql("SYS TYPES 0").v1(); cmd.execute(null, wrap(r -> { @@ -77,7 +77,7 @@ public void testSysTypesDefaultFiltering() throws Exception { }, ex -> fail(ex.getMessage()))); } - public void testSysTypesPositiveFiltering() throws Exception { + public void testSysTypesPositiveFiltering() { // boolean = 16 Command cmd = sql("SYS TYPES " + JDBCType.BOOLEAN.getVendorTypeNumber()).v1(); @@ -87,7 +87,7 @@ public void testSysTypesPositiveFiltering() throws Exception { }, ex -> fail(ex.getMessage()))); } - public void testSysTypesNegativeFiltering() throws Exception { + public void testSysTypesNegativeFiltering() { Command cmd = sql("SYS TYPES " + JDBCType.TINYINT.getVendorTypeNumber()).v1(); cmd.execute(null, wrap(r -> { @@ -96,7 +96,7 @@ public void testSysTypesNegativeFiltering() throws Exception { }, ex -> fail(ex.getMessage()))); } - public void testSysTypesMultipleMatches() throws Exception { + public void testSysTypesMultipleMatches() { Command cmd = sql("SYS TYPES " + JDBCType.VARCHAR.getVendorTypeNumber()).v1(); cmd.execute(null, wrap(r -> { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java index 17b1eedf06d93..c94da6621515b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryFolderTests.java @@ -70,6 +70,36 @@ public void testFoldingToLocalExecWithProject() { assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); } + public void testFoldingToLocalExecWithProjectAndLimit() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE 1 = 2 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithProjectAndOrderBy() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE 1 = 2 ORDER BY 1"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + + public void testFoldingToLocalExecWithProjectAndOrderByAndLimit() { + PhysicalPlan p = plan("SELECT keyword FROM test WHERE 1 = 2 ORDER BY 1 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(EmptyExecutable.class, le.executable().getClass()); + EmptyExecutable ee = (EmptyExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("keyword{f}#")); + } + public void testLocalExecWithPrunedFilterWithFunction() { PhysicalPlan p = plan("SELECT E() FROM test WHERE PI() = 5"); assertEquals(LocalExec.class, p.getClass()); @@ -90,6 +120,36 @@ public void testLocalExecWithPrunedFilterWithFunctionAndAggregation() { assertThat(ee.output().get(0).toString(), startsWith("E(){c}#")); } + public void testFoldingToLocalExecWithAggregationAndLimit() { + PhysicalPlan p = plan("SELECT 'foo' FROM test GROUP BY 1 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(SingletonExecutable.class, le.executable().getClass()); + SingletonExecutable ee = (SingletonExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("'foo'{c}#")); + } + + public void testFoldingToLocalExecWithAggregationAndOrderBy() { + PhysicalPlan p = plan("SELECT 'foo' FROM test GROUP BY 1 ORDER BY 1"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(SingletonExecutable.class, le.executable().getClass()); + SingletonExecutable ee = (SingletonExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("'foo'{c}#")); + } + + public void testFoldingToLocalExecWithAggregationAndOrderByAndLimit() { + PhysicalPlan p = plan("SELECT 'foo' FROM test GROUP BY 1 ORDER BY 1 LIMIT 10"); + assertEquals(LocalExec.class, p.getClass()); + LocalExec le = (LocalExec) p; + assertEquals(SingletonExecutable.class, le.executable().getClass()); + SingletonExecutable ee = (SingletonExecutable) le.executable(); + assertEquals(1, ee.output().size()); + assertThat(ee.output().get(0).toString(), startsWith("'foo'{c}#")); + } + public void testLocalExecWithoutFromClause() { PhysicalPlan p = plan("SELECT E(), 'foo', abs(10)"); assertEquals(LocalExec.class, p.getClass()); @@ -302,7 +362,7 @@ public void testGroupKeyTypes_DateTime() { "\"source\":\"InternalSqlScriptUtils.add(InternalSqlScriptUtils.docValue(doc,params.v0)," + "InternalSqlScriptUtils.intervalYearMonth(params.v1,params.v2))\",\"lang\":\"painless\",\"params\":{" + "\"v0\":\"date\",\"v1\":\"P1Y2M\",\"v2\":\"INTERVAL_YEAR_TO_MONTH\"}},\"missing_bucket\":true," + - "\"value_type\":\"date\",\"order\":\"asc\"}}}]}}}")); + "\"value_type\":\"long\",\"order\":\"asc\"}}}]}}}")); assertEquals(2, ee.output().size()); assertThat(ee.output().get(0).toString(), startsWith("count(*){a->")); assertThat(ee.output().get(1).toString(), startsWith("a{s->")); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index ef7cdf54b89ab..b77fb5a8a76c1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -17,13 +17,14 @@ import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; -import org.elasticsearch.xpack.sql.analysis.index.MappingException; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.expression.function.grouping.Histogram; import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.optimizer.Optimizer; import org.elasticsearch.xpack.sql.parser.SqlParser; @@ -36,13 +37,16 @@ import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateHistogram; +import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; import org.elasticsearch.xpack.sql.querydsl.query.Query; import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; +import org.elasticsearch.xpack.sql.querydsl.query.RegexQuery; import org.elasticsearch.xpack.sql.querydsl.query.ScriptQuery; import org.elasticsearch.xpack.sql.querydsl.query.TermQuery; import org.elasticsearch.xpack.sql.querydsl.query.TermsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.WildcardQuery; import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.EsField; @@ -59,6 +63,7 @@ import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation.E; import static org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation.PI; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; @@ -109,16 +114,6 @@ public void testTermEqualityAnalyzer() { assertEquals("value", tq.value()); } - public void testTermEqualityAnalyzerAmbiguous() { - LogicalPlan p = plan("SELECT some.string FROM test WHERE some.ambiguous = 'value'"); - assertTrue(p instanceof Project); - p = ((Project) p).child(); - assertTrue(p instanceof Filter); - Expression condition = ((Filter) p).condition(); - // the message is checked elsewhere (in FieldAttributeTests) - expectThrows(MappingException.class, () -> QueryTranslator.toQuery(condition, false)); - } - public void testTermEqualityNotAnalyzed() { LogicalPlan p = plan("SELECT some.string FROM test WHERE int = 5"); assertTrue(p instanceof Project); @@ -184,15 +179,107 @@ public void testDateRangeCast() { assertEquals("date", rq.field()); assertEquals(DateUtils.asDateTime("1969-05-13T12:34:56Z"), rq.lower()); } + + public void testLikeOnInexact() { + LogicalPlan p = plan("SELECT * FROM test WHERE some.string LIKE '%a%'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation qt = QueryTranslator.toQuery(condition, false); + assertEquals(WildcardQuery.class, qt.query.getClass()); + WildcardQuery qsq = ((WildcardQuery) qt.query); + assertEquals("some.string.typical", qsq.field()); + } + + public void testRLikeOnInexact() { + LogicalPlan p = plan("SELECT * FROM test WHERE some.string RLIKE '.*a.*'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation qt = QueryTranslator.toQuery(condition, false); + assertEquals(RegexQuery.class, qt.query.getClass()); + RegexQuery qsq = ((RegexQuery) qt.query); + assertEquals("some.string.typical", qsq.field()); + } public void testLikeConstructsNotSupported() { - LogicalPlan p = plan("SELECT LTRIM(keyword) lt FROM test WHERE LTRIM(keyword) LIKE '%a%'"); + LogicalPlan p = plan("SELECT LTRIM(keyword) lt FROM test WHERE LTRIM(keyword) like '%a%'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); + assertEquals("Scalar function [LTRIM(keyword)] not allowed (yet) as argument for LIKE", ex.getMessage()); + } + + public void testRLikeConstructsNotSupported() { + LogicalPlan p = plan("SELECT LTRIM(keyword) lt FROM test WHERE LTRIM(keyword) RLIKE '.*a.*'"); assertTrue(p instanceof Project); p = ((Project) p).child(); assertTrue(p instanceof Filter); Expression condition = ((Filter) p).condition(); SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); - assertEquals("Scalar function (LTRIM(keyword)) not allowed (yet) as arguments for LIKE", ex.getMessage()); + assertEquals("Scalar function [LTRIM(keyword)] not allowed (yet) as argument for RLIKE", ex.getMessage()); + } + + public void testDifferentLikeAndNotLikePatterns() { + LogicalPlan p = plan("SELECT keyword k FROM test WHERE k LIKE 'X%' AND k NOT LIKE 'Y%'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + + Expression condition = ((Filter) p).condition(); + QueryTranslation qt = QueryTranslator.toQuery(condition, false); + assertEquals(BoolQuery.class, qt.query.getClass()); + BoolQuery bq = ((BoolQuery) qt.query); + assertTrue(bq.isAnd()); + assertTrue(bq.left() instanceof WildcardQuery); + assertTrue(bq.right() instanceof NotQuery); + + NotQuery nq = (NotQuery) bq.right(); + assertTrue(nq.child() instanceof WildcardQuery); + WildcardQuery lqsq = (WildcardQuery) bq.left(); + WildcardQuery rqsq = (WildcardQuery) nq.child(); + + assertEquals("X*", lqsq.query()); + assertEquals("keyword", lqsq.field()); + assertEquals("Y*", rqsq.query()); + assertEquals("keyword", rqsq.field()); + } + + public void testRLikePatterns() { + String[] patterns = new String[] {"(...)+", "abab(ab)?", "(ab){1,2}", "(ab){3}", "aabb|bbaa", "a+b+|b+a+", "aa(cc|bb)", + "a{4,6}b{4,6}", ".{3}.{3}", "aaa*bbb*", "a+.+", "a.c.e", "[^abc\\-]"}; + for (int i = 0; i < 5; i++) { + assertDifferentRLikeAndNotRLikePatterns(randomFrom(patterns), randomFrom(patterns)); + } + } + + private void assertDifferentRLikeAndNotRLikePatterns(String firstPattern, String secondPattern) { + LogicalPlan p = plan("SELECT keyword k FROM test WHERE k RLIKE '" + firstPattern + "' AND k NOT RLIKE '" + secondPattern + "'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + + Expression condition = ((Filter) p).condition(); + QueryTranslation qt = QueryTranslator.toQuery(condition, false); + assertEquals(BoolQuery.class, qt.query.getClass()); + BoolQuery bq = ((BoolQuery) qt.query); + assertTrue(bq.isAnd()); + assertTrue(bq.left() instanceof RegexQuery); + assertTrue(bq.right() instanceof NotQuery); + + NotQuery nq = (NotQuery) bq.right(); + assertTrue(nq.child() instanceof RegexQuery); + RegexQuery lqsq = (RegexQuery) bq.left(); + RegexQuery rqsq = (RegexQuery) nq.child(); + + assertEquals(firstPattern, lqsq.regex()); + assertEquals("keyword", lqsq.field()); + assertEquals(secondPattern, rqsq.regex()); + assertEquals("keyword", rqsq.field()); } public void testTranslateNotExpression_WhereClause_Painless() { @@ -309,8 +396,8 @@ public void testTranslateInExpression_WhereClause() { tq.asBuilder().toString().replaceAll("\\s", "")); } - public void testTranslateInExpression_WhereClauseAndNullHandling() { - LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', null, 'lala', null, 'foo', concat('la', 'la'))"); + public void testTranslateInExpression_WhereClause_TextFieldWithKeyword() { + LogicalPlan p = plan("SELECT * FROM test WHERE some.string IN ('foo', 'bar', 'lala', 'foo', concat('la', 'la'))"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); Expression condition = ((Filter) p.children().get(0)).condition(); @@ -319,21 +406,22 @@ public void testTranslateInExpression_WhereClauseAndNullHandling() { Query query = translation.query; assertTrue(query instanceof TermsQuery); TermsQuery tq = (TermsQuery) query; - assertEquals("{\"terms\":{\"keyword\":[\"foo\",\"lala\"],\"boost\":1.0}}", + assertEquals("{\"terms\":{\"some.string.typical\":[\"foo\",\"bar\",\"lala\"],\"boost\":1.0}}", tq.asBuilder().toString().replaceAll("\\s", "")); } - public void testTranslateInExpressionInvalidValues_WhereClause() { - LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', 'bar', keyword)"); + public void testTranslateInExpression_WhereClauseAndNullHandling() { + LogicalPlan p = plan("SELECT * FROM test WHERE keyword IN ('foo', null, 'lala', null, 'foo', concat('la', 'la'))"); assertTrue(p instanceof Project); assertTrue(p.children().get(0) instanceof Filter); Expression condition = ((Filter) p.children().get(0)).condition(); assertFalse(condition.foldable()); - SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); - assertEquals( - "Line 1:52: Comparisons against variables are not (currently) supported; " - + "offender [keyword] in [keyword IN ('foo', 'bar', keyword)]", - ex.getMessage()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof TermsQuery); + TermsQuery tq = (TermsQuery) query; + assertEquals("{\"terms\":{\"keyword\":[\"foo\",\"lala\"],\"boost\":1.0}}", + tq.asBuilder().toString().replaceAll("\\s", "")); } public void testTranslateInExpression_WhereClause_Painless() { @@ -413,6 +501,50 @@ public void testTranslateMathFunction_HavingClause_Painless() { assertThat(aggFilter.scriptTemplate().params().toString(), startsWith("[{a=max(int){a->")); assertThat(aggFilter.scriptTemplate().params().toString(), endsWith(", {v=10}]")); } + + public void testTranslateRoundWithOneParameter() { + LogicalPlan p = plan("SELECT ROUND(YEAR(date)) FROM test GROUP BY ROUND(YEAR(date))"); + + assertTrue(p instanceof Aggregate); + assertEquals(1, ((Aggregate) p).groupings().size()); + assertEquals(1, ((Aggregate) p).aggregates().size()); + assertTrue(((Aggregate) p).groupings().get(0) instanceof Round); + assertTrue(((Aggregate) p).aggregates().get(0) instanceof Round); + + Round groupingRound = (Round) ((Aggregate) p).groupings().get(0); + assertEquals(1, groupingRound.children().size()); + + QueryTranslator.GroupingContext groupingContext = QueryTranslator.groupBy(((Aggregate) p).groupings()); + assertNotNull(groupingContext); + ScriptTemplate scriptTemplate = groupingContext.tail.script(); + assertEquals("InternalSqlScriptUtils.round(InternalSqlScriptUtils.dateTimeChrono(InternalSqlScriptUtils.docValue(doc,params.v0), " + + "params.v1, params.v2),params.v3)", + scriptTemplate.toString()); + assertEquals("[{v=date}, {v=Z}, {v=YEAR}, {v=null}]", scriptTemplate.params().toString()); + } + + public void testTranslateRoundWithTwoParameters() { + LogicalPlan p = plan("SELECT ROUND(YEAR(date), -2) FROM test GROUP BY ROUND(YEAR(date), -2)"); + + assertTrue(p instanceof Aggregate); + assertEquals(1, ((Aggregate) p).groupings().size()); + assertEquals(1, ((Aggregate) p).aggregates().size()); + assertTrue(((Aggregate) p).groupings().get(0) instanceof Round); + assertTrue(((Aggregate) p).aggregates().get(0) instanceof Round); + + Round groupingRound = (Round) ((Aggregate) p).groupings().get(0); + assertEquals(2, groupingRound.children().size()); + assertTrue(groupingRound.children().get(1) instanceof Literal); + assertEquals(-2, ((Literal) groupingRound.children().get(1)).value()); + + QueryTranslator.GroupingContext groupingContext = QueryTranslator.groupBy(((Aggregate) p).groupings()); + assertNotNull(groupingContext); + ScriptTemplate scriptTemplate = groupingContext.tail.script(); + assertEquals("InternalSqlScriptUtils.round(InternalSqlScriptUtils.dateTimeChrono(InternalSqlScriptUtils.docValue(doc,params.v0), " + + "params.v1, params.v2),params.v3)", + scriptTemplate.toString()); + assertEquals("[{v=date}, {v=Z}, {v=YEAR}, {v=-2}]", scriptTemplate.params().toString()); + } public void testGroupByAndHavingWithFunctionOnTopOfAggregation() { LogicalPlan p = plan("SELECT keyword, MAX(int) FROM test GROUP BY 1 HAVING ABS(MAX(int)) > 10"); @@ -625,24 +757,48 @@ public void testTopHitsAggregationWithOneArg() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("FIRST(keyword)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.KEYWORD); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + "\"sort\":[{\"keyword\":{\"order\":\"asc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}")); } + { + PhysicalPlan p = optimizeAndPlan("SELECT MIN(keyword) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("MIN(keyword)", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); + assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), + endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + + "\"sort\":[{\"keyword\":{\"order\":\"asc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}")); + } { PhysicalPlan p = optimizeAndPlan("SELECT LAST(date) FROM test"); assertEquals(EsQueryExec.class, p.getClass()); EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("LAST(date)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.DATETIME); + assertEquals(DataType.DATETIME, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"date\",\"format\":\"epoch_millis\"}]," + "\"sort\":[{\"date\":{\"order\":\"desc\",\"missing\":\"_last\",\"unmapped_type\":\"date\"}}]}}}}}")); } + { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(keyword) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("MAX(keyword)", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); + assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), + endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + + "\"sort\":[{\"keyword\":{\"order\":\"desc\",\"missing\":\"_last\",\"unmapped_type\":\"keyword\"}}]}}}}}")); + } } public void testTopHitsAggregationWithTwoArgs() { @@ -652,7 +808,7 @@ public void testTopHitsAggregationWithTwoArgs() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("FIRST(keyword, int)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.KEYWORD); + assertEquals(DataType.KEYWORD, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"keyword\"}]," + @@ -666,7 +822,7 @@ public void testTopHitsAggregationWithTwoArgs() { EsQueryExec eqe = (EsQueryExec) p; assertEquals(1, eqe.output().size()); assertEquals("LAST(date, int)", eqe.output().get(0).qualifiedName()); - assertTrue(eqe.output().get(0).dataType() == DataType.DATETIME); + assertEquals(DataType.DATETIME, eqe.output().get(0).dataType()); assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), endsWith("\"top_hits\":{\"from\":0,\"size\":1,\"version\":false,\"seq_no_primary_term\":false," + "\"explain\":false,\"docvalue_fields\":[{\"field\":\"date\",\"format\":\"epoch_millis\"}]," + @@ -674,4 +830,67 @@ public void testTopHitsAggregationWithTwoArgs() { "{\"date\":{\"order\":\"desc\",\"missing\":\"_last\",\"unmapped_type\":\"date\"}}]}}}}}")); } } + + + public void testGlobalCountInImplicitGroupByForcesTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(*) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertTrue("Should be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testGlobalCountAllInImplicitGroupByForcesTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(ALL *) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertTrue("Should be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testGlobalCountInSpecificGroupByDoesNotForceTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(*) FROM test GROUP BY int"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertFalse("Should NOT be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testFieldAllCountDoesNotTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(ALL int) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertFalse("Should NOT be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testFieldCountDoesNotTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(int) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertFalse("Should NOT be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testDistinctCountDoesNotTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT COUNT(DISTINCT int) FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertFalse("Should NOT be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testNoCountDoesNotTrackHits() throws Exception { + PhysicalPlan p = optimizeAndPlan("SELECT int FROM test"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertFalse("Should NOT be tracking hits", eqe.queryContainer().shouldTrackHits()); + } + + public void testZonedDateTimeInScripts() throws Exception { + PhysicalPlan p = optimizeAndPlan( + "SELECT date FROM test WHERE date + INTERVAL 1 YEAR > CAST('2019-03-11T12:34:56.000Z' AS DATETIME)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertThat(eqe.queryContainer().toString().replaceAll("\\s+", ""), containsString( + "\"script\":{\"script\":{\"source\":\"InternalSqlScriptUtils.nullSafeFilter(" + + "InternalSqlScriptUtils.gt(InternalSqlScriptUtils.add(InternalSqlScriptUtils.docValue(doc,params.v0)," + + "InternalSqlScriptUtils.intervalYearMonth(params.v1,params.v2)),InternalSqlScriptUtils.asDateTime(params.v3)))\"," + + "\"lang\":\"painless\"," + + "\"params\":{\"v0\":\"date\",\"v1\":\"P1Y\",\"v2\":\"INTERVAL_YEAR\",\"v3\":\"2019-03-11T12:34:56.000Z\"}},")); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 73b4ea8fa8daa..b5563b4d6b831 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -151,10 +151,10 @@ public void testConversionToDate() { Conversion conversion = conversionFor(KEYWORD, to); assertNull(conversion.convert(null)); - assertEquals(date(0L), conversion.convert("1970-01-01T00:10:01Z")); - assertEquals(date(1483228800000L), conversion.convert("2017-01-01T00:11:00Z")); - assertEquals(date(-1672531200000L), conversion.convert("1917-01-01T00:11:00Z")); - assertEquals(date(18000000L), conversion.convert("1970-01-01T03:10:20-05:00")); + assertEquals(date(0L), conversion.convert("1970-01-01")); + assertEquals(date(1483228800000L), conversion.convert("2017-01-01")); + assertEquals(date(-1672531200000L), conversion.convert("1917-01-01")); + assertEquals(date(18000000L), conversion.convert("1970-01-01")); // double check back and forth conversion ZonedDateTime zdt = TestUtils.now(); @@ -162,7 +162,7 @@ public void testConversionToDate() { Conversion back = conversionFor(KEYWORD, DATE); assertEquals(DateUtils.asDateOnly(zdt), back.convert(forward.convert(zdt))); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); - assertEquals("cannot cast [0xff] to [date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + assertEquals("cannot cast [0xff] to [date]: Text '0xff' could not be parsed at index 0", e.getMessage()); } } @@ -199,6 +199,7 @@ public void testConversionToDateTime() { Conversion conversion = conversionFor(KEYWORD, to); assertNull(conversion.convert(null)); + assertEquals(dateTime(0L), conversion.convert("1970-01-01")); assertEquals(dateTime(1000L), conversion.convert("1970-01-01T00:00:01Z")); assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); assertEquals(dateTime(1483228800000L), conversion.convert("2017-01-01T00:00:00Z")); @@ -210,7 +211,8 @@ public void testConversionToDateTime() { Conversion back = conversionFor(KEYWORD, DATETIME); assertEquals(dt, back.convert(forward.convert(dt))); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); - assertEquals("cannot cast [0xff] to [datetime]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + assertEquals("cannot cast [0xff] to [datetime]: failed to parse date field [0xff] with format [date_optional_time]", + e.getMessage()); } } @@ -447,6 +449,10 @@ public void testCommonType() { assertEquals(FLOAT, commonType(FLOAT, INTEGER)); assertEquals(DOUBLE, commonType(DOUBLE, FLOAT)); + // numeric and intervals + assertEquals(INTERVAL_YEAR_TO_MONTH, commonType(INTERVAL_YEAR_TO_MONTH, LONG)); + assertEquals(INTERVAL_HOUR_TO_MINUTE, commonType(INTEGER, INTERVAL_HOUR_TO_MINUTE)); + // dates/datetimes and intervals assertEquals(DATETIME, commonType(DATE, DATETIME)); assertEquals(DATETIME, commonType(DATETIME, DATE)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java index 7b38718dad794..47f01be917867 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java @@ -7,8 +7,13 @@ import org.elasticsearch.test.ESTestCase; +import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; +import java.util.List; +import java.util.stream.Stream; +import static java.util.stream.Collectors.toList; import static org.elasticsearch.xpack.sql.type.DataType.DATETIME; import static org.elasticsearch.xpack.sql.type.DataType.FLOAT; import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_DAY; @@ -108,6 +113,34 @@ public void testIncompatibleInterval() throws Exception { assertNull(compatibleInterval(INTERVAL_MINUTE_TO_SECOND, INTERVAL_MONTH)); } + public void testEsToDataType() throws Exception { + List types = new ArrayList<>(Arrays.asList("null", "boolean", "bool", + "byte", "tinyint", + "short", "smallint", + "integer", + "long", "bigint", + "double", "real", + "half_float", "scaled_float", "float", + "decimal", "numeric", + "keyword", "text", "varchar", + "date", "datetime", "timestamp", + "binary", "varbinary", + "ip", + "interval_year", "interval_month", "interval_year_to_month", + "interval_day", "interval_hour", "interval_minute", "interval_second", + "interval_day_to_hour", "interval_day_to_minute", "interval_day_to_second", + "interval_hour_to_minute", "interval_hour_to_second", + "interval_minute_to_second")); + + types.addAll(Stream.of(DataType.values()) + .filter(DataType::isPrimitive) + .map(DataType::name) + .collect(toList())); + String type = randomFrom(types.toArray(new String[0])); + DataType dataType = DataType.fromSqlOrEsType(type); + assertNotNull(dataType); + } + private DataType randomDataTypeNoDateTime() { return randomValueOtherThan(DataType.DATETIME, () -> randomFrom(DataType.values())); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 2a2488dda722f..5cfc6bac113f9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -81,7 +81,7 @@ public void testDateField() { EsField field = mapping.get("date"); assertThat(field.getDataType(), is(DATETIME)); assertThat(field.isAggregatable(), is(true)); - assertThat(field.getPrecision(), is(24)); + assertThat(field.getPrecision(), is(29)); } public void testDateNoFormat() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java index 19a544c14e50b..29cbb9b985ffd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java @@ -9,6 +9,7 @@ import static org.elasticsearch.xpack.sql.util.StringUtils.likeToJavaPattern; import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard; +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToUnescaped; public class LikeConversionTests extends ESTestCase { @@ -20,6 +21,10 @@ private static String wildcard(String pattern) { return likeToLuceneWildcard(pattern, '|'); } + private static String unescape(String pattern) { + return likeToUnescaped(pattern, '|'); + } + public void testNoRegex() { assertEquals("^fooBar$", regex("fooBar")); } @@ -103,4 +108,25 @@ public void testWildcardTripleEscaping() { public void testWildcardIgnoreDoubleEscapedButSkipEscapingOfSql() { assertEquals("foo\\\\\\*bar\\\\?\\?", wildcard("foo\\*bar\\_?")); } -} + + public void testUnescapeLiteral() { + assertEquals("foo", unescape("foo")); + } + + public void testUnescapeEscaped() { + assertEquals("foo_bar", unescape("foo|_bar")); + } + + public void testUnescapeEscapedEscape() { + assertEquals("foo|_bar", unescape("foo||_bar")); + } + + public void testUnescapeLastCharEscape() { + assertEquals("foo_bar|", unescape("foo|_bar|")); + } + + public void testUnescapeMultipleEscapes() { + assertEquals("foo|_bar|", unescape("foo|||_bar||")); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index 13c9f62b2136e..d93633f7aced0 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -43,6 +43,7 @@ } } } - } + }, + "foo_type" : { "type" : "foo" } } } diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index 336ddadea4c32..ba63034c170d0 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -105,13 +105,13 @@ private void waitForWatcher() throws Exception { if (isWatcherTest()) { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json index e860a590b5247..de4bb66056bb3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_stats.json @@ -8,6 +8,7 @@ "parts": { "index": { "type": "list", + "required" : true, "description": "A comma-separated list of index patterns; use `_all` to perform the operation on all indices" } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.forget_follower.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.forget_follower.json new file mode 100644 index 0000000000000..92d38e5e999e8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.forget_follower.json @@ -0,0 +1,21 @@ +{ + "ccr.forget_follower": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/forget_follower", + "paths": [ "/{index}/_ccr/forget_follower" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "the name of the leader index for which specified follower retention leases should be removed" + } + } + }, + "body": { + "description" : "the name and UUID of the follower index, the name of the cluster containing the follower index, and the alias from the perspective of that cluster for the remote cluster containing the leader index", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json index a092ffb1582eb..293694d0ae8a0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/graph.explore.json @@ -1,5 +1,5 @@ { - "xpack.graph.explore": { + "graph.explore": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html", "methods": ["GET", "POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json index fc55f40962903..2441e64264809 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -11,13 +11,7 @@ "description" : "The name of the index to explain" } }, - "params": { - "human": { - "type" : "boolean", - "default" : "false", - "description" : "Return data such as dates in a human readable format" - } - } + "params": {} }, "body": null } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json similarity index 53% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json index 72229bafdbe04..a85552e557597 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.delete.json @@ -1,6 +1,6 @@ { - "xpack.license.delete": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.delete": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-license.html", "methods": ["DELETE"], "url": { "path": "/_license", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json similarity index 74% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json index aa425d3b12d8e..07a570935a6b7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get.json @@ -1,6 +1,6 @@ { - "xpack.license.get": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.get": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html", "methods": ["GET"], "url": { "path": "/_license", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json similarity index 59% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json index d5ae7be328718..cfb5608ac4063 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_basic_status.json @@ -1,6 +1,6 @@ { - "xpack.license.get_basic_status": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.get_basic_status": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-basic-status.html", "methods": ["GET"], "url": { "path": "/_license/basic_status", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json similarity index 59% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json index dd867ae6e79a5..daeb4913ad9eb 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.get_trial_status.json @@ -1,6 +1,6 @@ { - "xpack.license.get_trial_status": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.get_trial_status": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trial-status.html", "methods": ["GET"], "url": { "path": "/_license/trial_status", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json similarity index 76% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json index 5c58f55004217..1bd78a1fb6af5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post.json @@ -1,6 +1,6 @@ { - "xpack.license.post": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.post": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-license.html", "methods": ["PUT", "POST"], "url": { "path": "/_license", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json similarity index 73% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json index 4b4610973f9bc..91da4de9ff0a2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_basic.json @@ -1,6 +1,6 @@ { - "xpack.license.post_start_basic": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.post_start_basic": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/start-basic.html", "methods": ["POST"], "url": { "path": "/_license/start_basic", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json similarity index 79% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json index 8c8b19b0506ba..55a73df4946b5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/license.post_start_trial.json @@ -1,6 +1,6 @@ { - "xpack.license.post_start_trial": { - "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "license.post_start_trial": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trial.html", "methods": ["POST"], "url": { "path": "/_license/start_trial", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json similarity index 70% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json index 9ca2d5fd75ad2..7c7ad54f095a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/migration.deprecations.json @@ -1,6 +1,6 @@ { - "xpack.migration.deprecations": { - "documentation": "http://www.elastic.co/guide/en/migration/current/migration-api-deprecation.html", + "migration.deprecations": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-deprecation.html", "methods": [ "GET" ], "url": { "path": "/{index}/_migration/deprecations", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json index 94b69951e32ec..4e5550ae824a9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json @@ -1,6 +1,6 @@ { "ml.find_file_structure": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-file-structure.html", + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-find-file-structure.html", "methods": [ "POST" ], "url": { "path": "/_ml/find_file_structure", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.get_calendars.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.get_calendars.json index 29d0067b5f989..de2e192d7e072 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.get_calendars.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.get_calendars.json @@ -24,6 +24,8 @@ } } }, - "body": null + "body": { + "description": "The from and size parameters optionally sent in the body" + } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json index 55ce7b9ba6170..a299e2adc4bc7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/monitoring.bulk.json @@ -1,6 +1,6 @@ { "monitoring.bulk": { - "documentation": "http://www.elastic.co/guide/en/monitoring/current/appendix-api-bulk.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/es-monitoring.html", "methods": ["POST", "PUT"], "url": { "path": "/_monitoring/bulk", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json index 8046667f889e1..e03cd2ae977b9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.delete_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.delete_job": { + "rollup.delete_job": { "documentation": "", "methods": [ "DELETE" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json index fc2f49f8415e8..aa5d56e590910 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_jobs.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_jobs": { + "rollup.get_jobs": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json index f21bdf26bbf09..0fd8aa3168222 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_caps.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_rollup_caps": { + "rollup.get_rollup_caps": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json similarity index 89% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json index f2db0e93dce77..c446f29e7591b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_index_caps.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.get_rollup_index_caps.json @@ -1,5 +1,5 @@ { - "xpack.rollup.get_rollup_index_caps": { + "rollup.get_rollup_index_caps": { "documentation": "", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json index 5b5d59b1dd3c3..ca33affd7d8a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.put_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.put_job": { + "rollup.put_job": { "documentation": "", "methods": [ "PUT" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json similarity index 84% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json index 0858e3260f822..5a63dfa3b42aa 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.rollup_search.json @@ -1,5 +1,5 @@ { - "xpack.rollup.rollup_search": { + "rollup.rollup_search": { "documentation": "", "methods": [ "GET", "POST" ], "url": { @@ -7,9 +7,9 @@ "paths": [ "{index}/_rollup_search", "{index}/{type}/_rollup_search" ], "parts": { "index": { - "type": "string", + "type": "list", "required": true, - "description": "The index or index-pattern (containing rollup or regular data) that should be searched" + "description": "The indices or index-pattern(s) (containing rollup or regular data) that should be searched" }, "type": { "type": "string", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json index 6fad8ef9c35c5..8ee505b195b22 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.start_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.start_job": { + "rollup.start_job": { "documentation": "", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json similarity index 96% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json index b42087208e202..152b72945800d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/rollup.stop_job.json @@ -1,5 +1,5 @@ { - "xpack.rollup.stop_job": { + "rollup.stop_job": { "documentation": "", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.disable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.disable_user.json index cfc04b52ad38d..3aa50467fb546 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.disable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.disable_user.json @@ -11,7 +11,7 @@ "username": { "type" : "string", "description" : "The username of the user to disable", - "required" : false + "required" : true } }, "params": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.enable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.enable_user.json index aa50ddce9ac0a..950a2b54939cd 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.enable_user.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.enable_user.json @@ -11,7 +11,7 @@ "username": { "type" : "string", "description" : "The username of the user to enable", - "required" : false + "required" : true } }, "params": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_privileges.json index 95a39bd313a2b..2cf10a872d519 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_privileges.json @@ -5,6 +5,8 @@ "url": { "path": "/_security/privilege/{application}/{name}", "paths": [ + "/_security/privilege", + "/_security/privilege/{application}", "/_security/privilege/{application}/{name}" ], "parts": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json index 45f1e4a08c6d8..b60298f89e017 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.get_user_privileges.json @@ -1,6 +1,6 @@ { "security.get_user_privileges": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user-privileges.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-privileges.html", "methods": [ "GET" ], "url": { "path": "/_security/user/_privileges", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json index 2d2ce3519b239..ec84f9543bfe0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.clear_cursor.json @@ -1,5 +1,5 @@ { - "xpack.sql.clear_cursor": { + "sql.clear_cursor": { "documentation": "Clear SQL cursor", "methods": [ "POST"], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json index b95aa509772fd..c12a876e8cd32 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.query.json @@ -1,5 +1,5 @@ { - "xpack.sql.query": { + "sql.query": { "documentation": "Execute SQL", "methods": [ "POST", "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json index 29a522ceb31c7..2200a61be66b2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/sql.translate.json @@ -1,5 +1,5 @@ { - "xpack.sql.translate": { + "sql.translate": { "documentation": "Translate SQL into Elasticsearch queries", "methods": [ "POST", "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json index 5f1ed7f860f97..4920c986a042f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.ack_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.ack_watch": { + "watcher.ack_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json index 12c38ce1bebf8..49fb169dede77 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.activate_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.activate_watch": { + "watcher.activate_watch": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json index d9cb9d653bc01..ddc68b439395e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.deactivate_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.deactivate_watch": { + "watcher.deactivate_watch": { "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json index a243315c91a62..cdf61ad52023f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.delete_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.delete_watch": { + "watcher.delete_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html", "methods": [ "DELETE" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json similarity index 95% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json index 0456eef5f49ab..6db8f3ae115f5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.execute_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.execute_watch": { + "watcher.execute_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json similarity index 93% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json index b0587301ec425..81f21b4b0c1e5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.get_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.get_watch": { + "watcher.get_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html", "methods": [ "GET" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json similarity index 97% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json index 438f2e4ee7637..24f020a7b90b4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.put_watch.json @@ -1,5 +1,5 @@ { - "xpack.watcher.put_watch": { + "watcher.put_watch": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html", "methods": [ "PUT", "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json similarity index 91% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json index eceb2a8628517..649b21c7db3f1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.start.json @@ -1,5 +1,5 @@ { - "xpack.watcher.start": { + "watcher.start": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json index 13857f1791019..01241848ab9f3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stats.json @@ -1,5 +1,5 @@ { - "xpack.watcher.stats": { + "watcher.stats": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html", "methods": [ "GET" ], "url": { @@ -7,14 +7,14 @@ "paths": [ "/_watcher/stats", "/_watcher/stats/{metric}" ], "parts": { "metric": { - "type" : "enum", + "type" : "list", "options" : ["_all", "queued_watches", "current_watches", "pending_watches"], "description" : "Controls what additional stat metrics should be include in the response" } }, "params": { "metric": { - "type" : "enum", + "type" : "list", "options" : ["_all", "queued_watches", "current_watches", "pending_watches"], "description" : "Controls what additional stat metrics should be include in the response" }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json similarity index 92% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json index 1a14947b4fb11..4deee79436e2d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/watcher.stop.json @@ -1,5 +1,5 @@ { - "xpack.watcher.stop": { + "watcher.stop": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html", "methods": [ "POST" ], "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json deleted file mode 100644 index cfa7d949efed4..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "xpack.migration.get_assistance": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-assistance.html", - "methods": [ "GET" ], - "url": { - "path": "/_migration/assistance", - "paths": [ - "/_migration/assistance", - "/_migration/assistance/{index}" - ], - "parts": { - "index": { - "type" : "list", - "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" - } - }, - "params": { - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - } - } - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json deleted file mode 100644 index d134b27d257a9..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "xpack.migration.upgrade": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-upgrade.html", - "methods": [ "POST" ], - "url": { - "path": "/_migration/upgrade/{index}", - "paths": [ - "/_migration/upgrade/{index}" - ], - "parts": { - "index": { - "type" : "string", - "required" : true, - "description" : "The name of the index" - } - }, - "params": { - "wait_for_completion": { - "type" : "boolean", - "default": true, - "description" : "Should the request block until the upgrade operation is completed" - } - } - } - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/api_key/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/api_key/10_basic.yml index def779cc7024e..77c655207ba81 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/api_key/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/api_key/10_basic.yml @@ -7,13 +7,34 @@ setup: cluster.health: wait_for_status: yellow + - do: + security.put_role: + name: "admin_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "*", + "privileges": ["all"] + } + ], + "applications": [ + { + "application": "myapp", + "privileges": ["*"], + "resources": ["*"] + } + ] + } + - do: security.put_user: username: "api_key_user" body: > { "password" : "x-pack-test-password", - "roles" : [ "superuser" ], + "roles" : [ "admin_role" ], "full_name" : "API key user" } @@ -38,6 +59,11 @@ setup: --- teardown: + - do: + security.delete_role: + name: "admin_role" + ignore: 404 + - do: security.delete_user: username: "api_key_user" @@ -54,7 +80,7 @@ teardown: - do: headers: - Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.create_api_key: body: > { @@ -105,7 +131,7 @@ teardown: - do: headers: - Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.create_api_key: body: > { @@ -140,8 +166,6 @@ teardown: - set: { name: api_key_name } - do: - headers: - Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" security.get_api_key: id: "$api_key_id" - match: { "api_keys.0.id": "$api_key_id" } @@ -157,7 +181,7 @@ teardown: - do: headers: - Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.create_api_key: body: > { @@ -174,8 +198,6 @@ teardown: - transform_and_set: { login_creds: "#base64EncodeCredentials(id,api_key)" } - do: - headers: - Authorization: Apikey ${login_creds} security.invalidate_api_key: body: > { @@ -193,7 +215,7 @@ teardown: - do: headers: - Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" + Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.create_api_key: body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml index 1cbb310bb4a08..99e6cdc72faf8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml @@ -7,7 +7,7 @@ setup: --- "Test Deprecations": - do: - xpack.migration.deprecations: + migration.deprecations: index: "*" - length: { cluster_settings: 0 } - length: { node_settings: 0 } @@ -54,7 +54,7 @@ setup: - do: warnings: - Deprecated field [use_dis_max] used, replaced by [Set [tie_breaker] to 1 instead] - xpack.migration.deprecations: + migration.deprecations: index: "*" - length: { ml_settings: 1 } - match: { ml_settings.0.level : warning } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml index ccd861e6358e0..c7aa714032f92 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml @@ -39,7 +39,7 @@ setup: wait_for_status: green - do: - xpack.graph.explore: + graph.explore: index: test_1 body: {"query": {"match": {"keys": 1}},"controls":{"use_significance":false},"vertices":[{"field": "keys","min_doc_count": 1}]} - length: {failures: 0} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml index 6f5b1bd740a92..0a3b2bc135b57 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -1,7 +1,7 @@ --- teardown: - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"3aa62ffe-36e1-4fad-bfdc-9dff8301eb22","type":"trial","issue_date_in_millis":1523456691721,"expiry_date_in_millis":1838816691721,"max_nodes":5,"issued_to":"customer","issuer":"elasticsearch","signature":"AAAABAAAAA2kWNcuc+DT0lrlmYZKAAAAIAo5/x6hrsGh1GqqrJmy4qgmEC7gK0U4zQ6q5ZEMhm4jAAABAEn6fG9y2VxKBu2T3D5hffh56kzOQODCOdhr0y2d17ZSIJMZRqO7ZywPCWNS1aR33GhfIHkTER0ysML0xMH/gXavhyRvMBndJj0UBKzuwpTawSlnxYtcqN8mSBIvJC7Ki+uJ1SpAILC2ZP9fnkRlqwXqBlTwfYn7xnZgu9DKrOWru/ipTPObo7jcePl8VTK6nWFen7/hCFDQTUFZ0jQvd+nq7A1PAcHGNxGfdbMVmAXCXgGWkRfT3clo9/vadgo+isNyh1sPq9mN7gwsvBAKtA1FrpH2EXYYbfOsSpBvUmhYMgErLg1k3/CbS0pCWLKOaX1xTMayosdZOjagU3auZXY=","start_date_in_millis":-1}]} @@ -10,7 +10,7 @@ teardown: ## current license version - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"894371dc-9t49-4997-93cb-8o2e3r7fa6a8","type":"trial","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1916956799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0FWh0T9njItjQ2qammAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBZhvozA0trrxhUZ1QbaTsKTna9C5KVQ6pv8yg1pnsBpZXCl8kX1SrgoFn1bXq61IvJwfw5qnmYNiH3hRhTO9EyaCBqaLk8NXZQ6TrRkQSpEnnBwAYUkZeKXsIuBoOk4B4mzwC/r8aMAkzrTiEBtBbog+57cSaU9y37Gkdd+1jXCQrxP+jOEUf7gnXWZvE6oeRroLvCt1fYn09k0CF8kKTbrPTSjC6igZR3uvTHyee74XQ9PRavvHax73T4UOEdQZX/P1ibSQIWKbBRD5YQ1POYVjTayoltTnWLMxfEcAkkATJZLhpBEHST7kZWjrTS6J1dCReJc7a8Vsj/78HXvOIy"}]} @@ -18,14 +18,14 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} ## a license object has 11 attributes - length: { license: 11 } ## bwc for licenses format - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAwAAAA2T3vqdBBetKQaBgxipAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQB7pGFYgawfLm9zzT80LvcLHjy1t/v2uSzCQWKdXXhrwSy4WrAH2uK/+PEiQ7aEpW5erLsyJ5KLA6OEZJDaP7r+mjOPuLt0++l5j4DMn7ybMzOPHXWBc6LETE3+pp0GZPyOmwsDkZSRUegTtciR2R6z+mdnGrhOYM80y08KVWwhdU/DHw41MK7ePo6tq73Nz49y9lDgt9fxA0t4ggEBPbnTDDBVQ25AjauY8sa0M5eg9rDDRayw1KamYWrara8PIGX+2YjhtUeQhmlCPdlxc9wECJ7/knPss5bI3ZoXQR3fyXhjcXNnHEIsblqLrMCal3pLxs7lI+KPYMa2ZYL/am4P"}]} @@ -33,13 +33,13 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } ## license version: 1.x - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"subscription","subscription_type":"gold","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0LVAywwpSH94cyXr4zAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQA4qscc/URRZVdFoLwgy9dqybYEQLW8YLkiAyPV5XHHHdtk+dtZIepiNEDkUXhSX2waVJlsNRF8/4kqplDfwNoD2TUM8fTgiIfiSiZYGDTGST+yW/5eAveEU5J5v1liBN27bwkqL+V4YAa0Tcm7NKKwjScWKAHiTU3vF8chPkGfCHE0kQgVwPC9RE82pTw0s6/uR4PfLGNFfqPM0uiE5nucfVrtj89JQiO/KA/7ZyFbo7VTNXxZQt7T7rZWBCP9KIjptXzcWuk08Q5S+rSoJNYbFo3HGKtrCVsRz/55rceNtdwKKXu1IwnSeir4I1/KLduQTtFLy0+1th87VS8T88UT"}]} @@ -47,13 +47,13 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } ## multiple licenses version: 1.x - do: - xpack.license.post: + license.post: acknowledge: true body: | {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"internal","subscription_type":"none","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1440892799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA04Q4ky3rFyyWLFkytEAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBxMvUMn4h2E4R4TQMijahTxQj4LPQO4f1M79UxX/XkDlGcH+J5pRHx08OtTRPsFL1lED+h+PIXx307Vo+PNDsOxrWvoYZeYBkOLAO3ny9vhQga+52jYhMxIuFrT9xbcSCSNpMhGojgOIPU2WgiopVdVcimo1+Gk8VtklPB1wPwFzfOjOnPgp/Icx3WYpfkeAUUOyWUYiFIBAe4bnz84iF+xwLKbgYk6aHF25ECBtdb/Uruhcm9+jEFpoIEUtCouvvk9C+NJZ4OickV4xpRgaRG2x9PONH8ZN0QGhGYhJGbisoCxuDmlLsyVxqxfMu3n/r7/jdsEJScjAlSrsLDOu6H"},{"uid":"893361dc-9749-4997-93cb-802e3dofh7aa","type":"internal","subscription_type":"none","issue_date_in_millis":1443484800000,"feature":"watcher","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6"}]} @@ -61,19 +61,19 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } --- "Should throw 404 after license deletion": - do: - xpack.license.delete: {} + license.delete: {} - match: { acknowledged: true } - do: - xpack.license.get: {} + license.get: {} catch: missing --- @@ -81,7 +81,7 @@ teardown: # VERSION_NO_FEATURE_TYPE license version - do: - xpack.license.post: + license.post: acknowledge: true body: | {"license": {"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAgAAAA3U8+YmnvwC+CWsV/mRAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBe8GfzDm6T537Iuuvjetb3xK5dvg0K5NQapv+rczWcQFxgCuzbF8plkgetP1aAGZP4uRESDQPMlOCsx4d0UqqAm9f7GbBQ3l93P+PogInPFeEH9NvOmaAQovmxVM9SE6DsDqlX4cXSO+bgWpXPTd2LmpoQc1fXd6BZ8GeuyYpVHVKp9hVU0tAYjw6HzYOE7+zuO1oJYOxElqy66AnIfkvHrvni+flym3tE7tDTgsDRaz7W3iBhaqiSntEqabEkvHdPHQdSR99XGaEvnHO1paK01/35iZF6OXHsF7CCj+558GRXiVxzueOe7TsGSSt8g7YjZwV9bRCyU7oB4B/nidgI"}} @@ -89,7 +89,7 @@ teardown: - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - length: { license: 11 } --- @@ -97,7 +97,7 @@ teardown: - do: catch: bad_request - xpack.license.post: + license.post: acknowledge: true body: | {"license":{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"basic","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0lKPZ0a7aZquUltho/AAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQAALuQ44S3IG6SzolcXVJ6Z4CIXORDrYQ+wdLCeey0XdujTslAOj+k+vNgo6wauc7Uswi01esHu4lb5IgpvKy7RRCbh5bj/z2ubu2qMJqopp9BQyD7VQjVfqmG6seUMJwJ1a5Avvm9r41YPSPcrii3bKK2e1l6jK6N8ibCvnTyY/XkYGCJrBWTSJePDbg6ErbyodrZ37x1StLbPWcNAkmweyHjDJnvYnbeZZO7A3NmubXZjW7Ttf8/YwQyE00PqMcl7fVPY3hkKpAeHf8aaJbqkKYbqZuER3EWJX7ZvLVb1dNdNg8aXRn7YrkQcYwWgptYQpfV+D7yEJ4j5muAEoler"}} @@ -108,7 +108,7 @@ teardown: - do: catch: bad_request - xpack.license.post: + license.post: acknowledge: true - match: { error.root_cause.0.reason: 'The license must be provided in the request body' } @@ -116,24 +116,24 @@ teardown: "Current license is trial means not eligle to start trial": - do: - xpack.license.get_trial_status: {} + license.get_trial_status: {} - match: { eligible_to_start_trial: false } - do: - xpack.license.post_start_basic: + license.post_start_basic: acknowledge: true - match: { basic_was_started: true } - do: - xpack.license.get_trial_status: {} + license.get_trial_status: {} - match: { eligible_to_start_trial: false } - do: catch: forbidden - xpack.license.post_start_trial: + license.post_start_trial: acknowledge: true - match: { trial_was_started: false } @@ -142,31 +142,31 @@ teardown: "Trial license cannot be basic": - do: catch: bad_request - xpack.license.post_start_trial: + license.post_start_trial: type: "basic" acknowledge: true --- "Can start basic license if do not already have basic": - do: - xpack.license.get_basic_status: {} + license.get_basic_status: {} - match: { eligible_to_start_basic: true } - do: - xpack.license.post_start_basic: + license.post_start_basic: acknowledge: true - match: { basic_was_started: true } - match: { acknowledged: true } - do: - xpack.license.get_basic_status: {} + license.get_basic_status: {} - match: { eligible_to_start_basic: false } - do: catch: forbidden - xpack.license.post_start_basic: {} + license.post_start_basic: {} - match: { basic_was_started: false } - match: { acknowledged: true } @@ -174,7 +174,7 @@ teardown: --- "Must acknowledge to start basic": - do: - xpack.license.post_start_basic: {} + license.post_start_basic: {} - match: { basic_was_started: false } - match: { acknowledged: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml index d6a7792db7d0c..b4b5c9ad9da7a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml @@ -127,6 +127,20 @@ - length: { calendars: 1} - match: { calendars.0.calendar_id: calendar2 } + - do: + ml.get_calendars: + body: > + { + "page": { + "from": 1, + "size": 2 + } + } + - match: { count: 3 } + - length: { calendars: 2} + - match: { calendars.0.calendar_id: calendar2 } + - match: { calendars.1.calendar_id: calendar3 } + --- "Test PageParams with ID is invalid": - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml index 04949aaa2b78d..bb8245a8e7eb4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml @@ -35,7 +35,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-custom-all-test-1 - type: doc id: custom_all_1464739200000_1_1 body: { @@ -62,7 +61,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-custom-all-test-2 - type: doc id: custom_all_1464739200000_1_2 body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index 742fc00beda74..5dda4f3def672 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -356,9 +356,9 @@ setup: datafeed_id: test-datafeed-aggs-1 - match: { datafeeds.0.datafeed_id: "test-datafeed-aggs-1" } - match: { datafeeds.0.aggregations.histogram_buckets.date_histogram.field: "@timestamp" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.@timestamp.max.field: "@timestamp" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.bytes_in_avg.avg.field: "system.network.in.bytes" } - - match: { datafeeds.0.aggregations.histogram_buckets.aggregations.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.@timestamp.max.field: "@timestamp" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.bytes_in_avg.avg.field: "system.network.in.bytes" } + - match: { datafeeds.0.aggregations.histogram_buckets.aggs.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" } --- "Test delete datafeed": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml index d780677ea2dcb..758ad967bfb46 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_forecast.yml @@ -34,7 +34,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "delete-forecast-job_model_forecast_someforecastid_1486591200000_1800_0_961_0" body: { @@ -56,7 +55,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "delete-forecast-job_model_forecast_someforecastid_1486591300000_1800_0_961_0" body: { @@ -78,7 +76,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "delete-forecast-job_model_forecast_request_stats_someforecastid" body: { @@ -112,19 +109,16 @@ setup: get: id: delete-forecast-job_model_forecast_request_stats_someforecastid index: .ml-anomalies-shared - type: doc - do: catch: missing get: id: delete-forecast-job_model_forecast_someforecastid_1486591300000_1800_0_961_0 index: .ml-anomalies-shared - type: doc - do: catch: missing get: id: delete-forecast-job_model_forecast_someforecastid_1486591200000_1800_0_961_0 index: .ml-anomalies-shared - type: doc --- "Test delete on _all forecasts not allow no forecasts": @@ -143,3 +137,11 @@ setup: forecast_id: _all allow_no_forecasts: true - match: { acknowledged: true } + +--- +"Test delete all where no forecast_id is set": + - do: + ml.delete_forecast: + job_id: delete-forecast-job + allow_no_forecasts: true + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml index a070625a7138b..943f21da31c7f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -39,7 +39,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot - type: doc id: "delete-model-snapshot_model_snapshot_inactive-snapshot" body: > { @@ -57,7 +56,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "delete-model-snapshot_model_state_inactive-snapshot#1" body: > { @@ -69,7 +67,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "delete-model-snapshot_model_state_inactive-snapshot#2" body: > { @@ -82,7 +79,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-delete-model-snapshot - type: doc id: "delete-model-snapshot_model_snapshot_active-snapshot" body: > { @@ -158,7 +154,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: index: .ml-state - type: doc - match: { count: 3 } @@ -191,7 +186,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: index: .ml-state - type: doc - match: { count: 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index bc6aaef2207aa..6dd5103d865f7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -8,7 +8,6 @@ setup: Content-Type: application/json index: index: .ml-meta - type: doc id: filter_imposter-filter body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml index df8fd41ca04d6..3d2ab8241a9e7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots - type: doc id: "get-model-snapshots-1" body: > { @@ -39,7 +38,6 @@ setup: Content-Type: application/json index: index: .ml-state - type: doc id: "get-model-snapshots_model_state_1#1" body: > { @@ -51,7 +49,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-get-model-snapshots - type: doc id: "get-model-snapshots-2" body: > { @@ -66,7 +63,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "get-model-snapshots_model_state_2#1" body: > { @@ -77,7 +73,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "get-model-snapshots_model_state_2#2" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml index c0be64dd30e1f..eb3a73424a601 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -186,7 +186,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-job2_categorizer_state#1 body: key: value @@ -196,7 +195,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-job2_categorizer_state#2 body: key: value @@ -299,7 +297,6 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: - type: doc index: .ml-state - match: {count: 0} @@ -307,7 +304,6 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: - type: doc index: .ml-state - match: {count: 0} @@ -315,7 +311,6 @@ setup: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser count: - type: doc index: .ml-state - match: {count: 0} @@ -387,7 +382,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: foo - type: doc body: key: value @@ -396,7 +390,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-anomalies-foo - type: doc body: key: value @@ -405,7 +398,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-anomalies-foo - type: doc body: key: value job_id: foo @@ -512,7 +504,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-quantiles-job_quantiles body: state: quantile-state @@ -563,7 +554,6 @@ setup: index: index: .ml-anomalies-shared - type: doc id: "index-layout-state-job_model_snapshot_123" body: > { @@ -579,7 +569,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-state-job_model_state_123#1 body: state: new-model-state @@ -589,7 +578,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-state-job_model_state_123#2 body: state: more-new-model-state @@ -599,7 +587,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-state-job_categorizer_state#1 body: state: new-categorizer-state @@ -609,7 +596,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: index-layout-state-job_categorizer_state#2 body: state: more-new-categorizer-state diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 82831b882226a..800a536d57e3d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -236,24 +236,6 @@ "time_format":"yyyy-MM-dd HH:mm:ssX" } } - - do: - catch: param - ml.put_job: - job_id: jobs-crud-id-already-taken - body: > - { - "job_id":"jobs-crud-id-already-taken", - "description":"Analysis of response time by airline", - "analysis_config" : { - "bucket_span": "1h", - "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] - }, - "data_description" : { - "field_delimiter":",", - "time_field":"time", - "time_format":"yyyy-MM-dd HH:mm:ssX" - } - } --- "Test update job": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml index d70d964a83acb..3a951e07a9f2e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets - type: doc id: "jobs-get-result-buckets_1464739200000_1" body: { @@ -40,7 +39,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets - type: doc id: "jobs-get-result-buckets_1470009600000_2" body: { @@ -57,7 +55,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-buckets - type: doc id: "jobs-get-result-buckets_1470096000000_3" body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml index 3db46d6904f6a..76ae079d786bb 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories - type: doc id: jobs-get-result-categories-1 body: { "job_id": "jobs-get-result-categories", "category_id": 1 } - do: @@ -32,7 +31,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-categories - type: doc id: jobs-get-result-categories-2 body: { "job_id": "jobs-get-result-categories", "category_id": 2 } - do: @@ -41,7 +39,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-unrelated - type: doc id: jobs-get-result-categories-3 body: { "job_id": "unrelated", "category_id": 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml index b1569ef8370b1..04d438101a7a1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-get-influencers-test - type: doc id: get-influencers-test_1464739200000_1_1 body: { @@ -42,7 +41,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-get-influencers-test - type: doc id: get-influencers-test_1464825600000_1_2 body: { @@ -62,7 +60,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-get-influencers-test - type: doc id: get-influencers-test_1464912000000_1_3 body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml index 66821f1aac491..a18fe92d7336a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml @@ -64,7 +64,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-60_1" body: { @@ -81,7 +80,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-60_2" body: { @@ -98,7 +96,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-60_3" body: { @@ -114,7 +111,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-30_1" body: { @@ -131,7 +127,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-30_2" body: { @@ -148,7 +143,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-30_3" body: { @@ -165,7 +159,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-17_1" body: { @@ -182,7 +175,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-17_2" body: { @@ -199,7 +191,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-17_3" body: { @@ -216,7 +207,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-get-result-overall-buckets-17_4" body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml index 5e91b80754039..10a61e32f8816 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records - type: doc id: jobs-get-result-records_1464739200000_1_1 body: { @@ -40,7 +39,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-jobs-get-result-records - type: doc id: jobs-get-result-records_1464825600000_1_2 body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml index e29700e423bdd..b8f1c3df0ca9d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -234,7 +234,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: job-stats-v54-bwc-test-data-counts body: { @@ -259,7 +258,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: job-stats-v54-bwc-test-model_size_stats body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml index c07bdf2add3c0..d157cc0531b65 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -25,7 +25,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "new_doc" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml index def909c942134..1684bdc4de0ab 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml @@ -89,7 +89,6 @@ setup: - do: get: index: .ml-anomalies-post-data-job - type: doc id: post-data-job_data_counts - match: { _source.processed_record_count: 2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml index f5b37afeba889..85e2ffd8a1856 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml @@ -39,7 +39,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_model_snapshot_first" body: > { @@ -67,7 +66,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_model_snapshot_second" body: > { @@ -95,7 +93,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1464825600000_1" body: > { @@ -111,7 +108,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1464782400000_1" body: > { @@ -127,7 +123,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1462060800000_1" body: > { @@ -143,7 +138,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1464825600000_1_1" body: > { @@ -159,7 +153,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1462060800000_1_2" body: > { @@ -175,7 +168,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1464825600000_1_3" body: { "job_id": "revert-model-snapshot", @@ -193,7 +185,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-revert-model-snapshot - type: doc id: "revert-model-snapshot_1462060800000_1_4" body: { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml index be1e0203a92c7..4a93e46c6b491 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml @@ -6,6 +6,10 @@ setup: indices.create: index: airline-data body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 1 mappings: properties: time: @@ -53,10 +57,9 @@ setup: job_id: set-upgrade-mode-job - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - ml.start_datafeed: - datafeed_id: set-upgrade-mode-job-datafeed + cluster.health: + index: airline-data + wait_for_status: green --- teardown: @@ -70,6 +73,10 @@ teardown: --- "Test setting upgrade_mode to false when it is already false": + - do: + ml.start_datafeed: + datafeed_id: set-upgrade-mode-job-datafeed + - do: ml.set_upgrade_mode: enabled: false @@ -92,6 +99,22 @@ teardown: --- "Setting upgrade_mode to enabled": + - do: + ml.start_datafeed: + datafeed_id: set-upgrade-mode-job-datafeed + + - do: + cat.tasks: {} + - match: + $body: | + /.+job.+/ + + - do: + cat.tasks: {} + - match: + $body: | + /.+datafeed.+/ + - do: ml.info: {} - match: { upgrade_mode: false } @@ -125,6 +148,22 @@ teardown: --- "Setting upgrade mode to disabled from enabled": + - do: + ml.start_datafeed: + datafeed_id: set-upgrade-mode-job-datafeed + + - do: + cat.tasks: {} + - match: + $body: | + /.+job.+/ + + - do: + cat.tasks: {} + - match: + $body: | + /.+datafeed.+/ + - do: ml.set_upgrade_mode: enabled: true @@ -167,7 +206,11 @@ teardown: ml.get_datafeed_stats: datafeed_id: set-upgrade-mode-job-datafeed - match: { datafeeds.0.state: "started" } - - match: { datafeeds.0.assignment_explanation: "" } + # The datafeed will not be assigned until the job has updated its status on the node it's assigned + # to, and that probably won't happen in time for this assertion. That is indicated by an assignment + # reason ending "state is stale". However, the datafeed should NOT be unassigned with a reason of + # "upgrade mode is enabled" - that reason should have gone away before this test. + - match: { datafeeds.0.assignment_explanation: /(^$|.+job.+state.is.stale)/ } - do: cat.tasks: {} @@ -175,12 +218,6 @@ teardown: $body: | /.+job.+/ - - do: - cat.tasks: {} - - match: - $body: | - /.+datafeed.+/ - --- "Attempt to open job when upgrade_mode is enabled": - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml index 5d364d72f612d..505173db281a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml @@ -23,7 +23,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot - type: doc id: "update-model-snapshot_model_snapshot_snapshot-1" body: > { @@ -39,7 +38,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "update-model-snapshot_model_state_1#1" body: > { @@ -50,7 +48,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "update-model-snapshot_model_state_1#2" body: > { @@ -61,7 +58,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "update-model-snapshot_model_state_1#3" body: > { @@ -73,7 +69,6 @@ setup: Content-Type: application/json index: index: .ml-anomalies-update-model-snapshot - type: doc id: "update-model-snapshot_model_snapshot_snapshot-2" body: > { @@ -90,7 +85,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "update-model-snapshot_model_state_2#1" body: > { @@ -101,7 +95,6 @@ setup: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser index: index: .ml-state - type: doc id: "update-model-snapshot_model_state_2#2" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml index 0c201aa6ef5d9..ce4751d690d80 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml @@ -1,6 +1,10 @@ --- "Bulk indexing of monitoring data": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/30101" + - do: monitoring.bulk: system_id: "kibana" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml index 40fa404f36147..1710e51c32bdc 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -41,7 +41,7 @@ setup: "Test basic delete_job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -80,12 +80,12 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs: [] @@ -94,7 +94,7 @@ setup: "Test delete job twice": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -133,12 +133,12 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs: [] @@ -147,7 +147,7 @@ setup: "Test delete running job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -186,13 +186,13 @@ setup: upgraded_doc_id: true - do: - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: catch: request - xpack.rollup.delete_job: + rollup.delete_job: id: foo - is_false: acknowledged - match: { task_failures.0.reason.type: "illegal_state_exception" } @@ -205,5 +205,5 @@ setup: catch: /the task with id \[does_not_exist\] doesn't exist/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.delete_job: + rollup.delete_job: id: does_not_exist diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index 6332302e67418..cd00a6f717b02 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -18,7 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -42,7 +42,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -83,7 +83,7 @@ setup: "Test get with no jobs": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: "_all" - length: { jobs: 0 } @@ -92,7 +92,7 @@ setup: "Test get missing job": - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -108,7 +108,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -134,7 +134,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -158,7 +158,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: "_all" - length: { jobs: 2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index 0b5a8a2e11180..3d38f4a371234 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -36,7 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -62,7 +62,7 @@ setup: "Verify one job caps": - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "foo" - match: @@ -87,7 +87,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -109,7 +109,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "foo" - match: @@ -146,7 +146,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -171,7 +171,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -194,7 +194,7 @@ setup: } - do: - xpack.rollup.get_rollup_caps: + rollup.get_rollup_caps: id: "_all" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index b300af5e0a014..e4b98b9492087 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -36,7 +36,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -62,7 +62,7 @@ setup: "Verify one job caps by rollup index": - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -87,7 +87,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -109,7 +109,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -147,7 +147,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -169,7 +169,7 @@ setup: ] } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup" - match: @@ -194,7 +194,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -218,7 +218,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -241,7 +241,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "_all" - match: @@ -297,7 +297,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -321,7 +321,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -344,7 +344,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "foo_rollup2,foo_rollup" - match: @@ -396,7 +396,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -420,7 +420,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo3 body: > { @@ -443,7 +443,7 @@ setup: } - do: - xpack.rollup.get_rollup_index_caps: + rollup.get_rollup_index_caps: index: "*_rollup2" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index f8cfe85cc2e6a..7983778108bd0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -18,7 +18,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -42,7 +42,7 @@ setup: - is_true: acknowledged - do: - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: @@ -86,7 +86,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -113,7 +113,7 @@ setup: catch: /Cannot create rollup job \[foo\] because job was previously created \(existing metadata\)/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -144,7 +144,7 @@ setup: catch: /foo/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -174,7 +174,7 @@ setup: catch: /unknown field \[headers\], parser not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -206,7 +206,7 @@ setup: catch: /Could not find a \[numeric\] or \[date\] field with name \[field_doesnt_exist\] in any of the indices matching the index pattern/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -235,7 +235,7 @@ setup: catch: /Unsupported metric \[does_not_exist\]/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index 9af896f4c9fab..a7765dfc15fe3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -17,7 +17,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -127,7 +127,7 @@ setup: "Basic Search": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -152,7 +152,7 @@ setup: "Formatted Date Histo": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -178,7 +178,7 @@ setup: "Empty aggregation": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: rest_total_hits_as_int: true index: "foo_rollup" body: @@ -193,7 +193,7 @@ setup: "Empty aggregation with new response format": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -210,7 +210,7 @@ setup: "Search with Metric": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -243,7 +243,7 @@ setup: "Search with Query": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -280,7 +280,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -388,7 +388,7 @@ setup: - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -423,7 +423,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -531,7 +531,7 @@ setup: - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -568,7 +568,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo2 body: > { @@ -675,7 +675,7 @@ setup: "_rollup.version": 1 - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" body: size: 0 @@ -710,7 +710,7 @@ setup: "Wildcards matching single rollup index": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup*" body: size: 0 @@ -750,7 +750,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -777,7 +777,7 @@ setup: - do: catch: /RollupSearch currently only supports searching one rollup index at a time\./ - xpack.rollup.rollup_search: + rollup.rollup_search: index: "*_rollup" body: size: 0 @@ -798,7 +798,7 @@ setup: name: rollup_alias - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "rollup_alias" body: size: 0 @@ -838,7 +838,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: bar body: > { @@ -870,7 +870,7 @@ setup: - do: catch: /RollupSearch currently only supports searching one rollup index at a time\./ - xpack.rollup.rollup_search: + rollup.rollup_search: index: "rollup_alias" body: size: 0 @@ -886,7 +886,7 @@ setup: "Search with typed_keys": - do: - xpack.rollup.rollup_search: + rollup.rollup_search: index: "foo_rollup" typed_keys: true body: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml index 5a53847187484..07f4e2b62a6f9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/security_tests.yml @@ -89,7 +89,7 @@ teardown: # This index pattern will match both indices, but we only have permission to read one - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -115,7 +115,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -135,7 +135,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs.0.stats.documents_processed: 1 @@ -250,7 +250,7 @@ teardown: # Index contains two docs, but we should only be able to see one of them - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -275,7 +275,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -295,7 +295,7 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user - xpack.rollup.get_jobs: + rollup.get_jobs: id: foo - match: jobs.0.stats.documents_processed: 1 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml index 88619e0dfc8ee..fbf9e8519059a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -44,7 +44,7 @@ setup: catch: /Task for Rollup Job \[does_not_exist\] not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: does_not_exist @@ -54,7 +54,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started @@ -62,6 +62,6 @@ setup: catch: /Cannot start task for Rollup Job \[foo\] because state was/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml index bcb592be7a074..7e8b6b3f61af0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -15,7 +15,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.put_job: + rollup.put_job: id: foo body: > { @@ -44,7 +44,7 @@ setup: catch: /Task for Rollup Job \[does_not_exist\] not found/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: does_not_exist @@ -54,21 +54,21 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped @@ -78,7 +78,7 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo - is_true: stopped @@ -92,14 +92,14 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo wait_for_completion: true - is_true: stopped @@ -113,14 +113,14 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.start_job: + rollup.start_job: id: foo - is_true: started - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.rollup.stop_job: + rollup.stop_job: id: foo wait_for_completion: true timeout: "5s" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml new file mode 100644 index 0000000000000..52b6259f7ccf0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml @@ -0,0 +1,139 @@ +--- + +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + security.put_role: + name: "alias_write_manage_role" + body: > + { + "indices": [ + { "names": ["write_manage_alias"], "privileges": ["write", "manage"] } + ] + } + + - do: + security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "alias_write_manage_role" ], + "full_name" : "user with privileges to write, manage via alias" + } + + - do: + indices.create: + index: logs-000001 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.put_alias: + index: logs-000001 + name: write_manage_alias + +--- +teardown: + - do: + security.delete_user: + username: "test_user" + ignore: 404 + + - do: + security.delete_role: + name: "alias_write_role" + ignore: 404 + + - do: + indices.delete_alias: + index: "logs-000001" + name: [ "write_manage_alias" ] + ignore: 404 + + - do: + indices.delete: + index: [ "logs-000001" ] + ignore: 404 + +--- +"Test rollover, index via write alias of index": + + # index using alias + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 1 + index: write_manage_alias + body: > + { + "name" : "doc1" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 2 + index: write_manage_alias + body: > + { + "name" : "doc2" + } + + - do: + indices.refresh: {} + + # rollover using alias + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.rollover: + alias: "write_manage_alias" + wait_for_active_shards: 1 + body: + conditions: + max_docs: 1 + + - match: { old_index: logs-000001 } + - match: { new_index: logs-000002 } + - match: { rolled_over: true } + - match: { dry_run: false } + - match: { conditions: { "[max_docs: 1]": true } } + + # ensure new index is created + - do: + indices.exists: + index: logs-000002 + + - is_true: '' + + # index using alias + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 3 + index: write_manage_alias + body: > + { + "name" : "doc3" + } + + - do: + indices.refresh: {} + + # check alias points to the new index and the doc was indexed + - do: + search: + rest_total_hits_as_int: true + index: write_manage_alias + + - match: { hits.total: 1 } + - match: { hits.hits.0._index: "logs-000002"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/12_security-7_read.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/12_security-7_read.yml new file mode 100644 index 0000000000000..cb910d4a88156 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/12_security-7_read.yml @@ -0,0 +1,85 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + security.put_role: + name: "all_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["*"], "privileges": ["all"] } + ] + } + + - do: + security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "all_access" ], + "full_name" : "user with all possible privileges (but not superuser)" + } + +--- +teardown: + - do: + security.delete_user: + username: "test_user" + ignore: 404 + + - do: + security.delete_role: + name: "all_access" + ignore: 404 + +--- +"Test get security index metadata": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".security-7" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".security*7" + - length: { $body: 0 } + +--- +"Test get security document": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + index: ".security-7" + type: "doc" + id: "user-test_user" + +--- +"Test search security index": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + rest_total_hits_as_int: true + index: ".security-7" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + rest_total_hits_as_int: true + index: ".security*7" + - match: { hits.total: 0 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml index 551866b3b1ebd..9ac15b309b161 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml @@ -26,7 +26,7 @@ setup: --- "Execute some SQL": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -42,7 +42,7 @@ setup: --- "Paging through results": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -57,7 +57,7 @@ setup: - set: { cursor: cursor } - do: - xpack.sql.query: + sql.query: format: json body: cursor: "$cursor" @@ -68,7 +68,7 @@ setup: - set: { cursor: cursor } - do: - xpack.sql.query: + sql.query: format: json body: cursor: "$cursor" @@ -79,7 +79,7 @@ setup: --- "Getting textual representation": - do: - xpack.sql.query: + sql.query: format: txt body: query: "SELECT * FROM test ORDER BY int asc" @@ -95,7 +95,7 @@ setup: --- "Clean cursor": - do: - xpack.sql.query: + sql.query: format: json body: query: "SELECT * FROM test ORDER BY int asc" @@ -108,7 +108,7 @@ setup: - set: { cursor: cursor} - do: - xpack.sql.clear_cursor: + sql.clear_cursor: body: cursor: "$cursor" - match: { "succeeded": true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index 9fa8e6259f5ff..57f275af5e26b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -16,7 +16,7 @@ int: 1 - do: - xpack.sql.translate: + sql.translate: body: query: "SELECT * FROM test ORDER BY int asc" - match: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml deleted file mode 100644 index a2895bb1ed7d6..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -setup: - - do: - xpack.license.post: - body: > - { - "license": { - "uid": "b8520184-985d-4b04-8a89-b52da6e0aad1", - "type": "platinum", - "issue_date_in_millis": 1494510840000, - "expiry_date_in_millis": 2756814840000, - "max_nodes": 1, - "issued_to": "upgrade_api_test", - "issuer": "elasticsearch", - "signature": "AAAAAwAAAA0hsB+mfk9EqWiY6e1KAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQA6NkNF3Z219ptzRwZwGzgIwaXn5rXvOWSB9KK86xBqeYQMlO1ahCd4eW3FHWTuginPuqMX8okzN+UEMANPE3l0QxvrgCcTzNYPGqCJDwBb0ghuQ4Y5Cezn806sBnXLVF35B1HU2C1PYc1mZvisD63NqasrAVYb3GS6vwq8a7PYfKpfZfFCqG2SZIkSHACPGBTUiPbVEVv1iiOC04x/pjF4Kn26MPbFD5jbQBSY2V8TxoapMHf11EDpOTlMYkXgerbMg7VWtVCypTMJJrhoVguCrZvM8U/+sSnbodtnZUeAImnFbYeV10Rcw62dtrpka0yuo7h6Qtrvy9YqVHZDtyrM", - "start_date_in_millis": -1 - } - } - - - do: - indices.create: - index: test1 - - - do: - indices.refresh: {} - ---- -"Upgrade info - all": - - do: - xpack.migration.get_assistance: { index: _all } - - - length: { indices: 0 } - ---- -"Upgrade test - should fail as index is already up to date": - - do: - catch: /illegal_state_exception/ - xpack.migration.upgrade: { index: test1 } - ---- -"Upgrade test - wait_for_completion:false": - - - do: - xpack.migration.upgrade: - index: test1 - wait_for_completion: false - - - match: {task: '/.+:\d+/'} - - set: {task: task} - - - do: - tasks.get: - wait_for_completion: true - task_id: $task - - is_false: node_failures - - is_true: task - - match: {completed: true} - - is_true: error - - match: {error.type: "illegal_state_exception"} - - match: {error.reason: "Index [test1] cannot be upgraded"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml index e101ca8729aab..9f992adde9670 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml @@ -124,7 +124,6 @@ teardown: - do: get: index: .security - type: doc id: user-bob - set: { _source.password: "hash" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml index 9c861e3dcd831..ed35d17984679 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -38,7 +38,7 @@ wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } @@ -52,7 +52,7 @@ - match: { hits.hits.0._source.status.actions.test_index.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -63,6 +63,6 @@ cluster.health: wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml index 813e1f0c88899..34cea0135c9ec 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -38,14 +38,14 @@ wait_for_status: yellow - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" action_id: "test_index" - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml index 2a9a4959de4c2..30787ed3c3023 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml @@ -7,7 +7,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -15,7 +15,7 @@ teardown: "Ensure that ack status is reset after unsuccessful execution": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -54,18 +54,18 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } # having a false result will reset the ack state - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -80,12 +80,12 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -97,7 +97,7 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "ackable" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml index 946f23a2f5a4e..6e7e2030287f6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml @@ -7,7 +7,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -15,7 +15,7 @@ teardown: "Ensure that ack status is reset after unmet action condition": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -54,18 +54,18 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.ack_watch: + watcher.ack_watch: watch_id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "acked" } # having a false result will reset the ack state - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -80,12 +80,12 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -97,7 +97,7 @@ teardown: - match: { watch_record.status.actions.indexme.ack.state: "ackable" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { "status.actions.indexme.ack.state" : "ackable" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml index 99459119e3cdf..5f09e7ef1847a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -34,7 +34,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} @@ -42,7 +42,7 @@ - match: { status.state.active: true } - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } @@ -56,14 +56,14 @@ - match: { hits.hits.0._source.status.state.active: false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } - match: { status.state.active: false } - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } @@ -77,7 +77,7 @@ - match: { hits.hits.0._source.status.state.active: true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} @@ -85,7 +85,7 @@ - match: { status.state.active: true } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -97,11 +97,11 @@ wait_for_status: yellow - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "non-existent-watch" catch: missing - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml index d22b66f85d188..1e9526ab209fa 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test delete watch api": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -47,7 +47,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" - match: { found: true } @@ -61,6 +61,6 @@ teardown: --- "Non existent watch returns 404": - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "non-existent-watch" catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml index 1fd3c06b2eee7..4f4ab18796d7b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api with configured trigger data timestamps": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -42,7 +42,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -67,7 +67,7 @@ teardown: "Test execute watch API with user supplied watch": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -104,7 +104,7 @@ teardown: "Execute unknown watch results in 404": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "non-existent-watch" catch: missing @@ -112,7 +112,7 @@ teardown: "Test execute watch with alternative input": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -138,7 +138,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml index 3766cb6c4a788..fa0793378756e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml @@ -17,7 +17,7 @@ setup: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -87,7 +87,7 @@ setup: } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -146,7 +146,7 @@ setup: index: my_test_index - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml index 0d2497fed79f1..0511e4447db52 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api works with throttling": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -41,7 +41,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -60,7 +60,7 @@ teardown: - match: { watch_record.status.state.active: true } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -87,7 +87,7 @@ teardown: - match: { _id: "test_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -103,7 +103,7 @@ teardown: - match: { watch_record.status.execution_state: "executed" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml index 5c835f7d6927a..bb6efc6e1041f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api can ignore conditions": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -41,7 +41,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml index 3f6303b4d4718..38dcabf5601a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "test_watch" ignore: 404 --- "Test execute watch api supports action modes": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: > { @@ -38,7 +38,7 @@ teardown: - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { @@ -55,7 +55,7 @@ teardown: - match: { watch_record.result.actions.0.status: "simulated" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml index 8a9ba14cb849a..f21981de7cfcf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml @@ -17,7 +17,7 @@ setup: - set: { nodes.$master.http.publish_address: http_host } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml index f13c5faf59959..63ad1bd7fe233 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml @@ -2,7 +2,7 @@ "Test execute watch api returns proper error message with watch directly in the body": - do: catch: /please wrap watch including field \[trigger\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger": { @@ -11,7 +11,7 @@ } - do: catch: /please wrap watch including field \[input\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "input": { @@ -22,7 +22,7 @@ } - do: catch: /please wrap watch including field \[condition\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "condition": { @@ -31,7 +31,7 @@ } - do: catch: /please wrap watch including field \[actions\] inside a \"watch\" field/ - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "actions": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml index 3ae5492328702..09b2230f04c60 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test get watch api": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -54,7 +54,7 @@ teardown: - match: { hits.total: 1 } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml index fc795005ac8a8..eeed53a78c856 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml @@ -6,7 +6,7 @@ # ensure index exists by creating a different watch - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "other" body: > { @@ -38,7 +38,7 @@ - do: catch: missing - xpack.watcher.get_watch: + watcher.get_watch: id: "missing_watch" - match: { found : false} - match: { _id: "missing_watch" } @@ -56,7 +56,7 @@ - do: catch: missing - xpack.watcher.get_watch: + watcher.get_watch: id: "missing_watch" - match: { found : false} - match: { _id: "missing_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml index 81a12fe6f7ddb..5e51b9c8d0414 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml index 78d1b6e65e666..a517293a233af 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -44,7 +44,7 @@ - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" - match: { error.root_cause.0.type: "action_request_validation_exception" } - match: { error.root_cause.0.reason: "Validation Failed: 1: request body is missing;" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml index ab8d852dab3d4..baf27eb90efd6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with watch level throttle": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true} - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml index a48d667066ef3..bf44433ebf31a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with action level throttle period": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true} - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml index 47b27d6b9be3e..18e2829993955 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test put inactive watch": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" active: false body: > @@ -47,7 +47,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml index e76ab7fd71508..d86b496fde116 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml @@ -6,7 +6,7 @@ - do: catch: /Configured URL is empty/ - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ - do: catch: /Malformed URL/ - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml index bc26a60e4702f..517bb68dbec96 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch1" ignore: 404 --- "Test put watch api with action level condition": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch1" body: > { @@ -51,7 +51,7 @@ teardown: - match: { _id: "my_watch1" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch1" - match: { found : true } - match: { _id: "my_watch1" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml index 7bad6c8f1eebf..23d1b1057cfa0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml @@ -7,14 +7,14 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 --- "Test put watch api with index action using doc_id": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -45,7 +45,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -53,7 +53,7 @@ teardown: - match: { watch.actions.test_index.index.doc_id: "test_id1" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -62,7 +62,7 @@ teardown: --- "Test put watch api with index action using _id field": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -93,7 +93,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -101,7 +101,7 @@ teardown: - match: { watch.input.simple.value: 20 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -110,7 +110,7 @@ teardown: --- "Test put watch api with bulk index action using _id field": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -149,7 +149,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -159,7 +159,7 @@ teardown: - match: { watch.input.simple._doc.1.value: 40 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } @@ -169,7 +169,7 @@ teardown: --- "Test put watch api with bulk index action using _id field in one document": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -207,7 +207,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true } - match: { _id: "my_watch" } @@ -216,7 +216,7 @@ teardown: - match: { watch.input.simple._doc.1.value: 60 } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.state: "executed" } - match: { watch_record.status.execution_state: "executed" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml index ebef6c87d7022..077ddd2d30825 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml @@ -8,7 +8,7 @@ setup: "Test getting a watch does not contain the original password": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_password" body: > { @@ -40,7 +40,7 @@ setup: } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "watch_with_password" - match: { _id: "watch_with_password" } - match: { watch.input.http.request.auth.basic.password: "::es_redacted::" } @@ -50,7 +50,7 @@ setup: # version 1 - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_without_version_test" body: > { @@ -83,7 +83,7 @@ setup: - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_without_version_test" body: > { @@ -123,7 +123,7 @@ setup: # version 1 - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" body: > { @@ -161,7 +161,7 @@ setup: # as if two users in the watch UI tried to update the same watch - do: catch: conflict - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: 123034 if_primary_term: $primaryTerm @@ -196,7 +196,7 @@ setup: - do: catch: conflict - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: $seqNo if_primary_term: 234242423 @@ -230,7 +230,7 @@ setup: } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "watch_with_seq_no" if_seq_no: $seqNo if_primary_term: $primaryTerm diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml index 4bea2f655e624..8a9ceb04dc90e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/90_ensure_watch_gets_overwritten_without_version.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -32,14 +32,14 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { watch.input.simple.foo: "bar" } # change the simple input fields, then ensure the old # field does not exist on get - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -66,7 +66,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { watch.input.simple.spam: "eggs" } - is_false: watch.input.simple.foo diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml index eba7f75a75968..c427f634a604e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml @@ -18,7 +18,7 @@ setup: "Test search input includes hits by default": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -61,7 +61,7 @@ setup: "Test search transform includes hits by default": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml index 575d01fcee767..d3d25ae1ea7a6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml @@ -4,5 +4,5 @@ cluster.health: wait_for_status: yellow - - do: {xpack.watcher.start: {}} + - do: {watcher.start: {}} - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml index 5a90af3725294..f5a8b149fe952 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -1,13 +1,13 @@ --- "Test watcher stats output": - - do: {xpack.watcher.stats: {}} + - do: {watcher.stats: {}} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } --- "Test watcher stats supports emit_stacktraces parameter": - do: - xpack.watcher.stats: + watcher.stats: metric: "all" emit_stacktraces: "true" - match: { "manually_stopped": false } @@ -20,7 +20,7 @@ reason: metrics were fixed in 7.0.0 - do: - xpack.watcher.stats: + watcher.stats: metric: "current_watches" - is_false: stats.0.queued_watches @@ -33,7 +33,7 @@ reason: metrics were fixed in 7.0.0 - do: - xpack.watcher.stats: + watcher.stats: metric: "queued_watches" - is_false: stats.0.current_watches @@ -50,7 +50,7 @@ warnings: - 'The pending_watches parameter is deprecated, use queued_watches instead' - xpack.watcher.stats: + watcher.stats: metric: "pending_watches" - is_false: stats.0.current_watches @@ -59,7 +59,7 @@ --- "Test watcher stats all watches": - do: - xpack.watcher.stats: + watcher.stats: metric: "_all" - is_true: stats.0.current_watches diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml index 518714c57ab3f..a4bac7b78a10f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml @@ -4,8 +4,8 @@ cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stop: {}} + - do: {watcher.stop: {}} - match: { acknowledged: true } - - do: {xpack.watcher.start: {}} + - do: {watcher.start: {}} - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index 432308581f6f0..754f5281d8535 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -2,7 +2,7 @@ "Test watcher usage stats output": - do: catch: missing - xpack.watcher.delete_watch: + watcher.delete_watch: id: "usage_stats_watch" - do: {xpack.usage: {}} @@ -10,7 +10,7 @@ - set: { "watcher.count.total": watch_count_total } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "usage_stats_watch" body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index 81699d70c4ea1..514ba61824067 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -24,5 +24,4 @@ - contains: { nodes.$master.modules: { name: x-pack-rollup } } - contains: { nodes.$master.modules: { name: x-pack-security } } - contains: { nodes.$master.modules: { name: x-pack-sql } } - - contains: { nodes.$master.modules: { name: x-pack-upgrade } } - contains: { nodes.$master.modules: { name: x-pack-watcher } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml index 5e61f98bbc297..213d935a5c52f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -7,7 +7,7 @@ wait_for_status: yellow - do: - xpack.license.delete: {} + license.delete: {} - match: { acknowledged: true } # we don't have a license now @@ -30,7 +30,7 @@ # - is_false: features.monitoring.available TODO fix once licensing is fixed - do: - xpack.license.post: + license.post: body: > { "license": { @@ -49,7 +49,7 @@ - match: { license_status: "valid" } - do: - xpack.license.get: {} + license.get: {} - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - match: { license.type: "internal" } - match: { license.status: "active" } diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle deleted file mode 100644 index 25a39168dbc09..0000000000000 --- a/x-pack/plugin/upgrade/build.gradle +++ /dev/null @@ -1,45 +0,0 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask - -evaluationDependsOn(xpackModule('core')) - -apply plugin: 'elasticsearch.esplugin' -esplugin { - name 'x-pack-upgrade' - description 'Elasticsearch Expanded Pack Plugin - Upgrade' - classname 'org.elasticsearch.xpack.upgrade.Upgrade' - extendedPlugins = ['x-pack-core'] -} - -archivesBaseName = 'x-pack-upgrade' - -dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - compileOnly project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') -} - -compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" -compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" - -run { - plugin xpackModule('core') -} - -integTest.enabled = false - -// Instead we create a separate task to run the -// tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: unitTest.dependsOn) { - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} -check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test - -// also add an "alias" task to make typing on the command line easier -task icTest { - dependsOn internalClusterTest -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java deleted file mode 100644 index 62a2829b9258c..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.script.Script; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; - -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.function.Function; - -/** - * Generic upgrade check applicable to all indices to be upgraded from the current version - * to the next major version - *

- * The upgrade is performed in the following way: - *

- * - preUpgrade method is called - * - reindex is performed - * - postUpgrade is called if reindex was successful - */ -public class IndexUpgradeCheck { - - private final String name; - private final Function actionRequired; - private final InternalIndexReindexer reindexer; - - /** - * Creates a new upgrade check - * - * @param name - the name of the check - * @param actionRequired - return true if they can work with the index with specified name - * @param client - client - * @param clusterService - cluster service - * @param types - a list of types that the reindexing should be limited to - * @param updateScript - the upgrade script that should be used during reindexing - */ - public IndexUpgradeCheck(String name, - Function actionRequired, - Client client, ClusterService clusterService, String[] types, Script updateScript) { - this(name, actionRequired, client, clusterService, types, updateScript, - listener -> listener.onResponse(null), (t, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE)); - } - - /** - * Creates a new upgrade check - * - * @param name - the name of the check - * @param actionRequired - return true if they can work with the index with specified name - * @param client - client - * @param clusterService - cluster service - * @param types - a list of types that the reindexing should be limited to - * @param updateScript - the upgrade script that should be used during reindexing - * @param preUpgrade - action that should be performed before upgrade - * @param postUpgrade - action that should be performed after upgrade - */ - public IndexUpgradeCheck(String name, - Function actionRequired, - Client client, ClusterService clusterService, String[] types, Script updateScript, - Consumer> preUpgrade, - BiConsumer> postUpgrade) { - this.name = name; - this.actionRequired = actionRequired; - this.reindexer = new InternalIndexReindexer<>(client, clusterService, IndexUpgradeCheckVersion.UPRADE_VERSION, updateScript, - types, preUpgrade, postUpgrade); - } - - /** - * Returns the name of the check - */ - public String getName() { - return name; - } - - /** - * This method is called by Upgrade API to verify if upgrade or reindex for this index is required - * - * @param indexMetaData index metadata - * @return required action or UpgradeActionRequired.NOT_APPLICABLE if this check cannot be performed on the index - */ - public UpgradeActionRequired actionRequired(IndexMetaData indexMetaData) { - return actionRequired.apply(indexMetaData); - } - - /** - * Perform the index upgrade - * - * @param task the task that executes the upgrade operation - * @param indexMetaData index metadata - * @param state current cluster state - * @param listener the listener that should be called upon completion of the upgrade - */ - public void upgrade(TaskId task, IndexMetaData indexMetaData, ClusterState state, - ActionListener listener) { - reindexer.upgrade(task, indexMetaData.getIndex().getName(), state, listener); - } -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java deleted file mode 100644 index 804e159025136..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.service.ClusterService; - -import java.util.Collection; -import java.util.Collections; - -/** - * Factory for index checks - */ -public interface IndexUpgradeCheckFactory { - - /** - * Using this method the check can expose additional user parameter that can be specified by the user on upgrade api - * - * @return the list of supported parameters - */ - default Collection supportedParams() { - return Collections.emptyList(); - } - - /** - * Creates an upgrade check - *

- * This method is called from {@link org.elasticsearch.plugins.Plugin#createComponents} method. - */ - IndexUpgradeCheck createCheck(Client client, ClusterService clusterService); - -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java deleted file mode 100644 index fb06c81f25f51..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.tasks.TaskId; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class IndexUpgradeService { - - private static final Logger logger = LogManager.getLogger(IndexUpgradeService.class); - - private final List upgradeChecks; - - private final IndexNameExpressionResolver indexNameExpressionResolver; - - public IndexUpgradeService(List upgradeChecks) { - this.upgradeChecks = upgradeChecks; - this.indexNameExpressionResolver = new IndexNameExpressionResolver(); - } - - /** - * Returns the information about required upgrade action for the given indices - * - * @param indices list of indices to check, specify _all for all indices - * @param options wild card resolution option - * @param state the current cluster state - * @return a list of indices that should be upgraded/reindexed - */ - public Map upgradeInfo(String[] indices, IndicesOptions options, ClusterState state) { - Map results = new HashMap<>(); - String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, options, indices); - MetaData metaData = state.getMetaData(); - for (String index : concreteIndexNames) { - IndexMetaData indexMetaData = metaData.index(index); - UpgradeActionRequired upgradeActionRequired = upgradeInfo(indexMetaData, index); - if (upgradeActionRequired != null) { - results.put(index, upgradeActionRequired); - } - } - return results; - } - - private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String index) { - for (IndexUpgradeCheck check : upgradeChecks) { - UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData); - logger.trace("[{}] check [{}] returned [{}]", index, check.getName(), upgradeActionRequired); - switch (upgradeActionRequired) { - case UPGRADE: - case REINDEX: - // this index needs to be upgraded or reindexed - skipping all other checks - return upgradeActionRequired; - case UP_TO_DATE: - // this index is good - skipping all other checks - return null; - case NOT_APPLICABLE: - // this action is not applicable to this index - skipping to the next one - break; - default: - throw new IllegalStateException("unknown upgrade action " + upgradeActionRequired + " for the index " - + index); - - } - } - // Catch all check for all indices that didn't match the specific checks - if (indexMetaData.getCreationVersion().before(Version.V_6_0_0)) { - return UpgradeActionRequired.REINDEX; - } else { - return null; - } - } - - public void upgrade(TaskId task, String index, ClusterState state, ActionListener listener) { - IndexMetaData indexMetaData = state.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - for (IndexUpgradeCheck check : upgradeChecks) { - UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData); - switch (upgradeActionRequired) { - case UPGRADE: - // this index needs to be upgraded - start the upgrade procedure - check.upgrade(task, indexMetaData, state, listener); - return; - case REINDEX: - // this index needs to be re-indexed - throw new IllegalStateException("Index [" + index + "] cannot be upgraded, it should be reindex instead"); - case UP_TO_DATE: - throw new IllegalStateException("Index [" + index + "] cannot be upgraded, it is up to date"); - case NOT_APPLICABLE: - // this action is not applicable to this index - skipping to the next one - break; - default: - throw new IllegalStateException("unknown upgrade action [" + upgradeActionRequired + "] for the index [" + index + "]"); - - } - } - throw new IllegalStateException("Index [" + index + "] cannot be upgraded"); - } - -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java deleted file mode 100644 index 6ab920555bb0b..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequest; -import org.elasticsearch.script.Script; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.transport.TransportResponse; - -import java.util.function.BiConsumer; -import java.util.function.Consumer; - -import static org.elasticsearch.index.IndexSettings.same; - -/** - * A component that performs the following upgrade procedure: - *

- * - Check that all data and master nodes are running running the same version - * - Create a new index .{name}-6 - * - Make index .{name} read only - * - Reindex from .{name} to .{name}-6 with transform - * - Delete index .{name} and add alias .{name} to .{name}-6 - */ -public class InternalIndexReindexer { - - private final Client client; - private final ClusterService clusterService; - private final Script transformScript; - private final String[] types; - private final int version; - private final Consumer> preUpgrade; - private final BiConsumer> postUpgrade; - - public InternalIndexReindexer(Client client, ClusterService clusterService, int version, Script transformScript, String[] types, - Consumer> preUpgrade, - BiConsumer> postUpgrade) { - this.client = client; - this.clusterService = clusterService; - this.transformScript = transformScript; - this.types = types; - this.version = version; - this.preUpgrade = preUpgrade; - this.postUpgrade = postUpgrade; - } - - public void upgrade(TaskId task, String index, ClusterState clusterState, ActionListener listener) { - ParentTaskAssigningClient parentAwareClient = new ParentTaskAssigningClient(client, task); - preUpgrade.accept(ActionListener.wrap( - t -> innerUpgrade(parentAwareClient, index, clusterState, ActionListener.wrap( - response -> postUpgrade.accept(t, ActionListener.wrap( - empty -> listener.onResponse(response), - listener::onFailure - )), - listener::onFailure - )), - listener::onFailure)); - } - - private void innerUpgrade(ParentTaskAssigningClient parentAwareClient, String index, ClusterState clusterState, - ActionListener listener) { - String newIndex = index + "-" + version; - try { - checkMasterAndDataNodeVersion(clusterState); - parentAwareClient.admin().indices().prepareCreate(newIndex).execute(ActionListener.wrap(createIndexResponse -> - setReadOnlyBlock(index, ActionListener.wrap(setReadOnlyResponse -> - reindex(parentAwareClient, index, newIndex, ActionListener.wrap( - bulkByScrollResponse -> // Successful completion of reindexing - delete old index - removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse -> - parentAwareClient.admin().indices().prepareAliases().removeIndex(index) - .addAlias(newIndex, index).execute(ActionListener.wrap(deleteIndexResponse -> - listener.onResponse(bulkByScrollResponse), listener::onFailure - )), listener::onFailure - )), - e -> // Something went wrong during reindexing - remove readonly flag and report the error - removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse -> { - listener.onFailure(e); - }, e1 -> { - listener.onFailure(e); - })) - )), listener::onFailure - )), listener::onFailure - )); - } catch (Exception ex) { - listener.onFailure(ex); - } - } - - private void checkMasterAndDataNodeVersion(ClusterState clusterState) { - if (clusterState.nodes().getMinNodeVersion().before(Upgrade.UPGRADE_INTRODUCED)) { - throw new IllegalStateException("All nodes should have at least version [" + Upgrade.UPGRADE_INTRODUCED + "] to upgrade"); - } - } - - private void removeReadOnlyBlock(ParentTaskAssigningClient parentAwareClient, String index, - ActionListener listener) { - Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build(); - parentAwareClient.admin().indices().prepareUpdateSettings(index).setSettings(settings).execute(listener); - } - - private void reindex(ParentTaskAssigningClient parentAwareClient, String index, String newIndex, - ActionListener listener) { - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceIndices(index); - reindexRequest.setSourceDocTypes(types); - reindexRequest.setDestIndex(newIndex); - reindexRequest.setRefresh(true); - reindexRequest.setScript(transformScript); - parentAwareClient.execute(ReindexAction.INSTANCE, reindexRequest, listener); - } - - /** - * Makes the index readonly if it's not set as a readonly yet - */ - private void setReadOnlyBlock(String index, ActionListener listener) { - clusterService.submitStateUpdateTask("lock-index-for-upgrade", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - final IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - - if (indexMetaData.getState() != IndexMetaData.State.OPEN) { - throw new IllegalStateException("unable to upgrade a closed index[" + index + "]"); - } - if (currentState.blocks().hasIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK)) { - throw new IllegalStateException("unable to upgrade a read-only index[" + index + "]"); - } - - final Settings indexSettingsBuilder = - Settings.builder() - .put(indexMetaData.getSettings()) - .put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), true) - .build(); - final IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData).settings(indexSettingsBuilder); - assert same(indexMetaData.getSettings(), indexSettingsBuilder) == false; - builder.settingsVersion(1 + builder.settingsVersion()); - - MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()).put(builder); - - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()) - .addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK); - - return ClusterState.builder(currentState).metaData(metaDataBuilder).blocks(blocks).build(); - } - - @Override - public void onFailure(String source, Exception e) { - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(TransportResponse.Empty.INSTANCE); - } - }); - } - -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java deleted file mode 100644 index 985baeaf9ab3f..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestHandler; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; -import org.elasticsearch.xpack.upgrade.actions.TransportIndexUpgradeAction; -import org.elasticsearch.xpack.upgrade.actions.TransportIndexUpgradeInfoAction; -import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeAction; -import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeInfoAction; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.function.BiFunction; -import java.util.function.Supplier; - -public class Upgrade extends Plugin implements ActionPlugin { - - public static final Version UPGRADE_INTRODUCED = Version.CURRENT.minimumCompatibilityVersion(); - - private final List> upgradeCheckFactories; - - public Upgrade() { - this.upgradeCheckFactories = new ArrayList<>(); - } - - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - List upgradeChecks = new ArrayList<>(upgradeCheckFactories.size()); - for (BiFunction checkFactory : upgradeCheckFactories) { - upgradeChecks.add(checkFactory.apply(client, clusterService)); - } - return Collections.singletonList(new IndexUpgradeService(Collections.unmodifiableList(upgradeChecks))); - } - - @Override - public List> getActions() { - return Arrays.asList( - new ActionHandler<>(IndexUpgradeInfoAction.INSTANCE, TransportIndexUpgradeInfoAction.class), - new ActionHandler<>(IndexUpgradeAction.INSTANCE, TransportIndexUpgradeAction.class) - ); - } - - @Override - public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, - IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier nodesInCluster) { - return Arrays.asList( - new RestIndexUpgradeInfoAction(settings, restController), - new RestIndexUpgradeAction(settings, restController) - ); - } - -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java deleted file mode 100644 index ff1a80d4edd01..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade.actions; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.upgrade.IndexUpgradeService; - -public class TransportIndexUpgradeAction extends TransportMasterNodeAction { - - private final IndexUpgradeService indexUpgradeService; - - @Inject - public TransportIndexUpgradeAction(TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexUpgradeService indexUpgradeService, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(IndexUpgradeAction.NAME, transportService, clusterService, threadPool, actionFilters, - IndexUpgradeAction.Request::new, indexNameExpressionResolver); - this.indexUpgradeService = indexUpgradeService; - } - - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected BulkByScrollResponse newResponse() { - return new BulkByScrollResponse(); - } - - @Override - protected ClusterBlockException checkBlock(IndexUpgradeAction.Request request, ClusterState state) { - // Cluster is not affected but we look up repositories in metadata - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); - } - - @Override - protected final void masterOperation(Task task, IndexUpgradeAction.Request request, ClusterState state, - ActionListener listener) { - TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId()); - indexUpgradeService.upgrade(taskId, request.index(), state, listener); - } - - @Override - protected final void masterOperation(IndexUpgradeAction.Request request, ClusterState state, - ActionListener listener) { - throw new UnsupportedOperationException("the task parameter is required"); - } - -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java deleted file mode 100644 index 70bbcb8eb05a4..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade.actions; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; -import org.elasticsearch.xpack.upgrade.IndexUpgradeService; - -import java.util.Map; - -public class TransportIndexUpgradeInfoAction - extends TransportMasterNodeReadAction { - - private final IndexUpgradeService indexUpgradeService; - private final XPackLicenseState licenseState; - - - @Inject - public TransportIndexUpgradeInfoAction(TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexUpgradeService indexUpgradeService, - IndexNameExpressionResolver indexNameExpressionResolver, - XPackLicenseState licenseState) { - super(IndexUpgradeInfoAction.NAME, transportService, clusterService, threadPool, actionFilters, - IndexUpgradeInfoRequest::new, indexNameExpressionResolver); - this.indexUpgradeService = indexUpgradeService; - this.licenseState = licenseState; - } - - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected IndexUpgradeInfoResponse newResponse() { - return new IndexUpgradeInfoResponse(); - } - - @Override - protected ClusterBlockException checkBlock(IndexUpgradeInfoRequest request, ClusterState state) { - // Cluster is not affected but we look up repositories in metadata - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); - } - - @Override - protected final void masterOperation(final IndexUpgradeInfoRequest request, ClusterState state, - final ActionListener listener) { - if (licenseState.isUpgradeAllowed()) { - Map results = - indexUpgradeService.upgradeInfo(request.indices(), request.indicesOptions(), state); - listener.onResponse(new IndexUpgradeInfoResponse(results)); - } else { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.UPGRADE)); - } - } -} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java deleted file mode 100644 index 9245e1953a107..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade.rest; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.BulkByScrollTask; -import org.elasticsearch.index.reindex.ScrollableHitSource; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.tasks.LoggingTaskListener; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction.Request; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.elasticsearch.rest.RestRequest.Method.POST; - -public class RestIndexUpgradeAction extends BaseRestHandler { - private static final Logger logger = LogManager.getLogger(RestIndexUpgradeAction.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - - public RestIndexUpgradeAction(Settings settings, RestController controller) { - super(settings); - controller.registerWithDeprecatedHandler( - POST, "_migration/upgrade/{index}", this, - POST, "_xpack/migration/upgrade/{index}", deprecationLogger); - } - - @Override - public String getName() { - return "migration_upgrade"; - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - if (request.method().equals(POST)) { - return handlePost(request, client); - } else { - throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); - } - } - - private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { - Request upgradeRequest = new Request(request.param("index")); - Map params = new HashMap<>(); - params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(true)); - params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(true)); - - if (request.paramAsBoolean("wait_for_completion", true)) { - return channel -> client.execute(IndexUpgradeAction.INSTANCE, upgradeRequest, - new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(BulkByScrollResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, new ToXContent.DelegatingMapParams(params, channel.request())); - builder.endObject(); - return new BytesRestResponse(getStatus(response), builder); - } - - private RestStatus getStatus(BulkByScrollResponse response) { - /* - * Return the highest numbered rest status under the assumption that higher numbered statuses are "more error" - * and thus more interesting to the user. - */ - RestStatus status = RestStatus.OK; - if (response.isTimedOut()) { - status = RestStatus.REQUEST_TIMEOUT; - } - for (BulkItemResponse.Failure failure : response.getBulkFailures()) { - if (failure.getStatus().getStatus() > status.getStatus()) { - status = failure.getStatus(); - } - } - for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { - RestStatus failureStatus = ExceptionsHelper.status(failure.getReason()); - if (failureStatus.getStatus() > status.getStatus()) { - status = failureStatus; - } - } - return status; - } - - }); - } else { - upgradeRequest.setShouldStoreResult(true); - - /* - * Validating before forking to make sure we can catch the issues earlier - */ - ActionRequestValidationException validationException = upgradeRequest.validate(); - if (validationException != null) { - throw validationException; - } - Task task = client.executeLocally(IndexUpgradeAction.INSTANCE, upgradeRequest, LoggingTaskListener.instance()); - // Send task description id instead of waiting for the message - return channel -> { - try (XContentBuilder builder = channel.newBuilder()) { - builder.startObject(); - builder.field("task", client.getLocalNodeId() + ":" + task.getId()); - builder.endObject(); - channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); - } - }; - } - } -} - diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java deleted file mode 100644 index 70636b126218c..0000000000000 --- a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade.rest; - - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoRequest; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; - -import java.io.IOException; - -import static org.elasticsearch.rest.RestRequest.Method.GET; - -public class RestIndexUpgradeInfoAction extends BaseRestHandler { - private static final Logger logger = LogManager.getLogger(RestIndexUpgradeInfoAction.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - - public RestIndexUpgradeInfoAction(Settings settings, RestController controller) { - super(settings); - controller.registerWithDeprecatedHandler( - GET, "_migration/assistance", this, - GET, "/_xpack/migration/assistance", deprecationLogger); - - controller.registerWithDeprecatedHandler( - GET, "_migration/assistance/{index}", this, - GET, "/_xpack/migration/assistance/{index}", deprecationLogger); - } - - @Override - public String getName() { - return "migration_assistance"; - } - - @Override - public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - if (request.method().equals(GET)) { - return handleGet(request, client); - } else { - throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); - } - } - - private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { - IndexUpgradeInfoRequest infoRequest = new IndexUpgradeInfoRequest(Strings.splitStringByCommaToArray(request.param("index"))); - infoRequest.indicesOptions(IndicesOptions.fromRequest(request, infoRequest.indicesOptions())); - return channel -> client.execute(IndexUpgradeInfoAction.INSTANCE, infoRequest, new RestToXContentListener<>(channel)); - } - -} - diff --git a/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy deleted file mode 100644 index f603bf9ad63ba..0000000000000 --- a/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy +++ /dev/null @@ -1,25 +0,0 @@ -grant { - // needed for multiple server implementations used in tests - permission java.net.SocketPermission "*", "accept,connect"; -}; - -grant codeBase "${codebase.netty-common}" { - // for reading the system-wide configuration for the backlog of established sockets - permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; -}; - -grant codeBase "${codebase.netty-transport}" { - // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 - // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! - permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; -}; - -grant codeBase "${codebase.elasticsearch-rest-client}" { - // rest client uses system properties which gets the default proxy - permission java.net.NetPermission "getProxySelector"; -}; - -grant codeBase "${codebase.httpasyncclient}" { - // rest client uses system properties which gets the default proxy - permission java.net.NetPermission "getProxySelector"; -}; \ No newline at end of file diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java deleted file mode 100644 index 3663d586159d9..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.Build; -import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; -import org.junit.Before; - -import java.util.Collections; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.core.IsEqual.equalTo; - -public class IndexUpgradeIT extends IndexUpgradeIntegTestCase { - - @Before - public void resetLicensing() throws Exception { - enableLicensing(); - } - - public void testIndexUpgradeInfo() { - // Testing only negative case here, the positive test is done in bwcTests - assertAcked(client().admin().indices().prepareCreate("test").get()); - ensureYellow("test"); - IndexUpgradeInfoResponse response = new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("test").get(); - assertThat(response.getActions().entrySet(), empty()); - } - - public void testIndexUpgradeInfoLicense() throws Exception { - // This test disables all licenses and generates a new one using dev private key - // in non-snapshot builds we are using production public key for license verification - // which makes this test to fail - assumeTrue("License is only valid when tested against snapshot/test keys", Build.CURRENT.isSnapshot()); - assertAcked(client().admin().indices().prepareCreate("test").get()); - ensureYellow("test"); - disableLicensing(); - ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, - () -> new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("test").get()); - assertThat(e.getMessage(), equalTo("current license is non-compliant for [upgrade]")); - enableLicensing(); - IndexUpgradeInfoResponse response = new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("test").get(); - assertThat(response.getActions().entrySet(), empty()); - } - - public void testUpToDateIndexUpgrade() throws Exception { - // Testing only negative case here, the positive test is done in bwcTests - String testIndex = "test"; - String testType = "doc"; - assertAcked(client().admin().indices().prepareCreate(testIndex).get()); - indexRandom(true, - client().prepareIndex(testIndex, testType, "1").setSource("{\"foo\":\"bar\"}", XContentType.JSON), - client().prepareIndex(testIndex, testType, "2").setSource("{\"foo\":\"baz\"}", XContentType.JSON) - ); - ensureYellow(testIndex); - - IllegalStateException ex = expectThrows(IllegalStateException.class, - () -> new IndexUpgradeAction.RequestBuilder(client()).setIndex(testIndex).get()); - assertThat(ex.getMessage(), equalTo("Index [" + testIndex + "] cannot be upgraded")); - - SearchResponse searchResponse = client().prepareSearch(testIndex).get(); - assertEquals(2L, searchResponse.getHits().getTotalHits().value); - } - - public void testInternalUpgradePrePostChecks() throws Exception { - String testIndex = "internal_index"; - String testType = "test"; - Long val = randomLong(); - AtomicBoolean preUpgradeIsCalled = new AtomicBoolean(); - AtomicBoolean postUpgradeIsCalled = new AtomicBoolean(); - - IndexUpgradeCheck check = new IndexUpgradeCheck( - "test", - indexMetaData -> { - if (indexMetaData.getIndex().getName().equals(testIndex)) { - return UpgradeActionRequired.UPGRADE; - } else { - return UpgradeActionRequired.NOT_APPLICABLE; - } - }, - client(), internalCluster().clusterService(internalCluster().getMasterName()), Strings.EMPTY_ARRAY, null, - listener -> { - assertFalse(preUpgradeIsCalled.getAndSet(true)); - assertFalse(postUpgradeIsCalled.get()); - listener.onResponse(val); - }, - (aLong, listener) -> { - assertTrue(preUpgradeIsCalled.get()); - assertFalse(postUpgradeIsCalled.getAndSet(true)); - assertEquals(aLong, val); - listener.onResponse(TransportResponse.Empty.INSTANCE); - }); - - assertAcked(client().admin().indices().prepareCreate(testIndex).get()); - indexRandom(true, - client().prepareIndex(testIndex, testType, "1").setSource("{\"foo\":\"bar\"}", XContentType.JSON), - client().prepareIndex(testIndex, testType, "2").setSource("{\"foo\":\"baz\"}", XContentType.JSON) - ); - ensureYellow(testIndex); - - IndexUpgradeService service = new IndexUpgradeService(Collections.singletonList(check)); - - PlainActionFuture future = PlainActionFuture.newFuture(); - service.upgrade(new TaskId("abc", 123), testIndex, clusterService().state(), future); - BulkByScrollResponse response = future.actionGet(); - assertThat(response.getCreated(), equalTo(2L)); - - SearchResponse searchResponse = client().prepareSearch(testIndex).get(); - assertEquals(2L, searchResponse.getHits().getTotalHits().value); - - assertTrue(preUpgradeIsCalled.get()); - assertTrue(postUpgradeIsCalled.get()); - } - - public void testIndexUpgradeInfoOnEmptyCluster() { - // On empty cluster asking for all indices shouldn't fail since no indices means nothing needs to be upgraded - IndexUpgradeInfoResponse response = new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("_all").get(); - assertThat(response.getActions().entrySet(), empty()); - - // but calling on a particular index should fail - assertThrows(new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("test"), IndexNotFoundException.class); - } -} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java deleted file mode 100644 index d7b709c4c3f6b..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.reindex.ReindexPlugin; -import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; -import org.elasticsearch.license.License; -import org.elasticsearch.license.TestUtils; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; -import org.elasticsearch.xpack.core.XPackClientPlugin; -import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; - -import java.util.Arrays; -import java.util.Collection; - -public abstract class IndexUpgradeIntegTestCase extends AbstractLicensesIntegrationTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, Upgrade.class, ReindexPlugin.class, - MockPainlessScriptEngine.TestPlugin.class, CommonAnalysisPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return Arrays.asList(XPackClientPlugin.class, ReindexPlugin.class); - } - private static String randomValidLicenseType() { - return randomFrom("trial", "platinum", "gold", "standard", "basic"); - } - - private static String randomInvalidLicenseType() { - return "missing"; - } - - public void disableLicensing() throws Exception { - updateLicensing(randomInvalidLicenseType()); - } - - public void enableLicensing() throws Exception { - updateLicensing(randomValidLicenseType()); - } - - public void updateLicensing(String licenseType) throws Exception { - wipeAllLicenses(); - if (licenseType.equals("missing")) { - putLicenseTombstone(); - } else { - License license = TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1)); - putLicense(license); - } - } -} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java deleted file mode 100644 index 961b86a53cdbf..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - - -import org.elasticsearch.Version; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; -import java.util.function.Function; - -import static org.hamcrest.core.IsEqual.equalTo; - -public class IndexUpgradeServiceTests extends ESTestCase { - - private IndexUpgradeCheck upgradeBarCheck = new IndexUpgradeCheck("upgrade_bar", - (Function) indexMetaData -> { - if ("bar".equals(indexMetaData.getSettings().get("test.setting"))) { - return UpgradeActionRequired.UPGRADE; - } else { - return UpgradeActionRequired.NOT_APPLICABLE; - } - }, null, null, null, null); - - private IndexUpgradeCheck reindexFooCheck = new IndexUpgradeCheck("reindex_foo", - (Function) indexMetaData -> { - if ("foo".equals(indexMetaData.getSettings().get("test.setting"))) { - return UpgradeActionRequired.REINDEX; - } else { - return UpgradeActionRequired.NOT_APPLICABLE; - } - }, null, null, null, null); - - private IndexUpgradeCheck everythingIsFineCheck = new IndexUpgradeCheck("everything_is_fine", - indexMetaData -> UpgradeActionRequired.UP_TO_DATE, null, null, null, null); - - private IndexUpgradeCheck unreachableCheck = new IndexUpgradeCheck("unreachable", - (Function) indexMetaData -> { - fail("Unreachable check is called"); - return null; - }, null, null, null, null); - - public void testIndexUpgradeServiceMultipleCheck() throws Exception { - IndexUpgradeService service; - if (randomBoolean()) { - service = new IndexUpgradeService(Arrays.asList( - upgradeBarCheck, - reindexFooCheck, - everythingIsFineCheck, - unreachableCheck // This one should never be called - )); - } else { - service = new IndexUpgradeService(Arrays.asList( - reindexFooCheck, - upgradeBarCheck, - everythingIsFineCheck, - unreachableCheck // This one should never be called - )); - } - - IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); - IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); - IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); - - ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); - - Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, - IndicesOptions.lenientExpandOpen(), clusterState); - - assertThat(result.size(), equalTo(2)); - assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); - assertThat(result.get("foo"), equalTo(UpgradeActionRequired.REINDEX)); - - result = service.upgradeInfo(new String[]{"b*"}, IndicesOptions.lenientExpandOpen(), clusterState); - - assertThat(result.size(), equalTo(1)); - assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); - } - - - public void testNoMatchingChecks() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( - upgradeBarCheck, - reindexFooCheck - )); - - IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); - IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); - IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); - - ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); - - Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, - IndicesOptions.lenientExpandOpen(), clusterState); - - assertThat(result.size(), equalTo(2)); - assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); - assertThat(result.get("foo"), equalTo(UpgradeActionRequired.REINDEX)); - } - - public void testEarlierChecksWin() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( - everythingIsFineCheck, - upgradeBarCheck, - reindexFooCheck - )); - - IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); - IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); - IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); - - ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); - - Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, - IndicesOptions.lenientExpandOpen(), clusterState); - - assertThat(result.size(), equalTo(0)); // everything as the first checker should indicate that everything is fine - } - - public void testGenericTest() throws Exception { - IndexUpgradeService service = new IndexUpgradeService(Arrays.asList( - upgradeBarCheck, - reindexFooCheck - )); - - IndexMetaData goodIndex = newTestIndexMeta("good", Settings.EMPTY); - IndexMetaData badIndex = newTestIndexMeta("bad", - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.0.0")).build()); - - ClusterState clusterState = mockClusterState(goodIndex, badIndex); - - Map result = service.upgradeInfo(new String[]{"good", "bad"}, - IndicesOptions.lenientExpandOpen(), clusterState); - - assertThat(result.size(), equalTo(1)); - assertThat(result.get("bad"), equalTo(UpgradeActionRequired.REINDEX)); - - } - - - private ClusterState mockClusterState(IndexMetaData... indices) { - MetaData.Builder metaDataBuilder = MetaData.builder(); - for (IndexMetaData indexMetaData : indices) { - metaDataBuilder.put(indexMetaData, false); - } - return ClusterState.builder(ClusterName.DEFAULT).metaData(metaDataBuilder).build(); - } - - public static IndexMetaData newTestIndexMeta(String name, String alias, Settings indexSettings) throws IOException { - Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_CREATION_DATE, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_6_0_0) - .put(indexSettings) - .build(); - IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(build); - if (alias != null) { - // Create alias - builder.putAlias(AliasMetaData.newAliasMetaDataBuilder(alias).build()); - } - return builder.build(); - } - - public static IndexMetaData newTestIndexMeta(String name, Settings indexSettings) throws IOException { - return newTestIndexMeta(name, null, indexSettings); - } -} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java deleted file mode 100644 index 1b9ad0a79fc38..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexPlugin; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.protocol.xpack.migration.IndexUpgradeInfoResponse; -import org.elasticsearch.protocol.xpack.migration.UpgradeActionRequired; -import org.elasticsearch.script.MockScriptEngine; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.xpack.core.upgrade.UpgradeField; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -@ESIntegTestCase.ClusterScope(scope = TEST, supportsDedicatedMasters = false, numClientNodes = 0, maxNumDataNodes = 1) -public class IndexUpgradeTasksIT extends ESIntegTestCase { - - @Override - protected boolean ignoreExternalCluster() { - return true; - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockUpgradePlugin.class, ReindexPlugin.class); - } - - public static class MockUpgradePlugin extends Plugin implements ScriptPlugin, ActionPlugin { - - public static final String NAME = MockScriptEngine.NAME; - - private Settings settings; - private Upgrade upgrade; - - private CountDownLatch upgradeLatch = new CountDownLatch(1); - private CountDownLatch upgradeCalledLatch = new CountDownLatch(1); - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new MockScriptEngine(pluginScriptLang(), pluginScripts(), Collections.emptyMap()); - } - - public String pluginScriptLang() { - return NAME; - } - - public MockUpgradePlugin(Settings settings) { - this.settings = settings; - this.upgrade = new Upgrade(); - LogManager.getLogger(IndexUpgradeTasksIT.class).info("MockUpgradePlugin is created"); - } - - - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - scripts.put("block", map -> { - upgradeCalledLatch.countDown(); - try { - assertThat(upgradeLatch.await(10, TimeUnit.SECONDS), equalTo(true)); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - return null; - }); - return scripts; - } - - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, - NamedXContentRegistry xContentRegistry, Environment environment, - NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - return Arrays.asList(new IndexUpgradeService(Collections.singletonList( - new IndexUpgradeCheck("test", - new Function() { - @Override - public UpgradeActionRequired apply(IndexMetaData indexMetaData) { - if ("test".equals(indexMetaData.getIndex().getName())) { - if (UpgradeField.checkInternalIndexFormat(indexMetaData)) { - return UpgradeActionRequired.UP_TO_DATE; - } else { - return UpgradeActionRequired.UPGRADE; - } - } else { - return UpgradeActionRequired.NOT_APPLICABLE; - } - } - }, - client, clusterService, Strings.EMPTY_ARRAY, - new Script(ScriptType.INLINE, NAME, "block", Collections.emptyMap())) - )), new XPackLicenseState(settings)); - } - - @Override - public List> getActions() { - return upgrade.getActions(); - } - - @Override - public Collection getRestHeaders() { - return upgrade.getRestHeaders(); - } - } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - - public void testParentTasksDuringUpgrade() throws Exception { - logger.info("before getInstance"); - PluginsService pluginsService = internalCluster().getDataNodeInstance(PluginsService.class); - MockUpgradePlugin mockUpgradePlugin = pluginsService.filterPlugins(MockUpgradePlugin.class).get(0); - assertThat(mockUpgradePlugin, notNullValue()); - logger.info("after getInstance"); - - assertAcked(client().admin().indices().prepareCreate("test").get()); - client().prepareIndex("test", "doc", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - - ensureYellow("test"); - - - IndexUpgradeInfoResponse infoResponse = new IndexUpgradeInfoAction.RequestBuilder(client()).setIndices("test").get(); - assertThat(infoResponse.getActions().keySet(), contains("test")); - assertThat(infoResponse.getActions().get("test"), equalTo(UpgradeActionRequired.UPGRADE)); - - - ActionFuture upgradeResponse = new IndexUpgradeAction.RequestBuilder(client()).setIndex("test").execute(); - - - assertThat(mockUpgradePlugin.upgradeCalledLatch.await(10, TimeUnit.SECONDS), equalTo(true)); - ListTasksResponse response = client().admin().cluster().prepareListTasks().get(); - mockUpgradePlugin.upgradeLatch.countDown(); - - // Find the upgrade task group - TaskGroup upgradeGroup = null; - for (TaskGroup group : response.getTaskGroups()) { - if (IndexUpgradeAction.NAME.equals(group.getTaskInfo().getAction())) { - assertThat(upgradeGroup, nullValue()); - upgradeGroup = group; - } - } - assertThat(upgradeGroup, notNullValue()); - assertThat(upgradeGroup.getTaskInfo().isCancellable(), equalTo(true)); // The task should be cancellable - assertThat(upgradeGroup.getChildTasks(), hasSize(1)); // The reindex task should be a child - assertThat(upgradeGroup.getChildTasks().get(0).getTaskInfo().getAction(), equalTo(ReindexAction.NAME)); - - assertThat(upgradeResponse.get().getCreated(), equalTo(1L)); - } -} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java deleted file mode 100644 index 013680ee2d17b..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade; - -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.ReindexPlugin; -import org.elasticsearch.indices.InvalidIndexNameException; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; - -import static org.elasticsearch.test.VersionUtils.randomVersionBetween; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.hamcrest.core.IsEqual.equalTo; - -public class InternalIndexReindexerIT extends IndexUpgradeIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, - ReindexPlugin.class, CustomScriptPlugin.class, CommonAnalysisPlugin.class); - } - - public static class CustomScriptPlugin extends MockScriptPlugin { - @Override - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - scripts.put("add_bar", map -> { - @SuppressWarnings("unchecked") Map ctx = (Map) map.get("ctx"); - ctx.put("_id", "bar" + "-" + ctx.get("_id")); - @SuppressWarnings("unchecked") Map source = (Map) ctx.get("_source"); - source.put("bar", true); - return null; - }); - scripts.put("fail", map -> { - throw new RuntimeException("Stop reindexing"); - }); - return scripts; - } - } - - public void testUpgradeIndex() throws Exception { - createTestIndex("test"); - InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); - BulkByScrollResponse response = future.actionGet(); - assertThat(response.getCreated(), equalTo(2L)); - - SearchResponse searchResponse = client().prepareSearch("test-123").get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - for (SearchHit hit : searchResponse.getHits().getHits()) { - assertThat(hit.getId(), startsWith("bar-")); - assertThat(hit.getSourceAsMap(), notNullValue()); - assertThat(hit.getSourceAsMap().get("bar"), equalTo(true)); - } - - GetAliasesResponse aliasesResponse = client().admin().indices().prepareGetAliases("test").get(); - assertThat(aliasesResponse.getAliases().size(), equalTo(1)); - List testAlias = aliasesResponse.getAliases().get("test-123"); - assertNotNull(testAlias); - assertThat(testAlias.size(), equalTo(1)); - assertThat(testAlias.get(0).alias(), equalTo("test")); - } - - public void testTargetIndexExists() throws Exception { - createTestIndex("test"); - createTestIndex("test-123"); - InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); - assertThrows(future, ResourceAlreadyExistsException.class); - - // Make sure that the index is not marked as read-only - client().prepareIndex("test", "doc").setSource("foo", "bar").get(); - } - - public void testTargetIndexExistsAsAlias() throws Exception { - createTestIndex("test"); - createTestIndex("test-foo"); - client().admin().indices().prepareAliases().addAlias("test-foo", "test-123").get(); - InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); - assertThrows(future, InvalidIndexNameException.class); - - // Make sure that the index is not marked as read-only - client().prepareIndex("test-123", "doc").setSource("foo", "bar").get(); - } - - public void testSourceIndexIsReadonly() throws Exception { - createTestIndex("test"); - try { - Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), true).build(); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get()); - InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); - assertThrows(future, IllegalStateException.class); - - // Make sure that the index is still marked as read-only - assertThrows(client().prepareIndex("test", "doc").setSource("foo", "bar"), ClusterBlockException.class); - } finally { - // Clean up the readonly index - Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build(); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get()); - } - } - - - public void testReindexingFailure() throws Exception { - createTestIndex("test"); - // Make sure that the index is not marked as read-only - client().prepareIndex("test", "doc").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - InternalIndexReindexer reindexer = createIndexReindexer(123, script("fail"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); - assertThrows(future, RuntimeException.class); - - // Make sure that the index is not marked as read-only - client().prepareIndex("test", "doc").setSource("foo", "bar").get(); - } - - public void testMixedNodeVersion() throws Exception { - createTestIndex("test"); - - InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); - PlainActionFuture future = PlainActionFuture.newFuture(); - reindexer.upgrade(new TaskId("abc", 123), "test", withRandomOldNode(), future); - assertThrows(future, IllegalStateException.class); - - // Make sure that the index is not marked as read-only - client().prepareIndex("test_v123", "doc").setSource("foo", "bar").get(); - } - - private void createTestIndex(String indexName) throws Exception { - assertAcked(client().admin().indices().prepareCreate(indexName).get()); - indexRandom(true, - client().prepareIndex(indexName, "doc", "1").setSource("{\"foo\":\"bar1-1\"}", XContentType.JSON), - client().prepareIndex(indexName, "doc", "2").setSource("{\"foo\":\"baz1-1\"}", XContentType.JSON) - ); - ensureYellow(indexName); - } - - private Script script(String name) { - return new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, name, new HashMap<>()); - } - - private InternalIndexReindexer createIndexReindexer(int version, Script transformScript, String[] types) { - return new InternalIndexReindexer(client(), internalCluster().clusterService(internalCluster().getMasterName()), - version, transformScript, types, voidActionListener -> voidActionListener.onResponse(null), - (aVoid, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE)); - - } - - private ClusterState clusterState() { - return clusterService().state(); - } - - private ClusterState withRandomOldNode() { - ClusterState clusterState = clusterState(); - DiscoveryNodes discoveryNodes = clusterState.nodes(); - List nodes = new ArrayList<>(); - for (ObjectCursor key : discoveryNodes.getMasterAndDataNodes().keys()) { - nodes.add(key.value); - } - // Fake one of the node versions - String nodeId = randomFrom(nodes); - DiscoveryNode node = discoveryNodes.get(nodeId); - DiscoveryNode newNode = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), - node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), - randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); - - return ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(discoveryNodes).remove(node).add(newNode)).build(); - - } -} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java deleted file mode 100644 index acc2d07df7284..0000000000000 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.upgrade.actions; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction.Request; - -public class IndexUpgradeActionRequestTests extends AbstractWireSerializingTestCase { - @Override - protected Request createTestInstance() { - return new Request(randomAlphaOfLength(10)); - } - - @Override - protected Writeable.Reader instanceReader() { - return Request::new; - } -} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index dd26db984cdf5..3313f1e21e1c8 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -117,7 +117,7 @@ run { plugin xpackModule('core') } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 6648241d82a3b..f5f12d4fd244a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -82,8 +82,6 @@ import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.actions.email.EmailAction; import org.elasticsearch.xpack.watcher.actions.email.EmailActionFactory; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatActionFactory; import org.elasticsearch.xpack.watcher.actions.index.IndexAction; import org.elasticsearch.xpack.watcher.actions.index.IndexActionFactory; import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; @@ -135,7 +133,6 @@ import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachementParser; import org.elasticsearch.xpack.watcher.notification.email.attachment.ReportingAttachmentParser; import org.elasticsearch.xpack.watcher.notification.email.support.BodyPartSource; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; import org.elasticsearch.xpack.watcher.notification.jira.JiraService; import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; import org.elasticsearch.xpack.watcher.notification.slack.SlackService; @@ -270,20 +267,18 @@ public Collection createComponents(Client client, ClusterService cluster throw new UncheckedIOException(e); } - new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry); + new WatcherIndexTemplateRegistry(environment.settings(), clusterService, threadPool, client, xContentRegistry); // http client httpClient = new HttpClient(settings, getSslService(), cryptoService, clusterService); // notification EmailService emailService = new EmailService(settings, cryptoService, clusterService.getClusterSettings()); - HipChatService hipChatService = new HipChatService(settings, httpClient, clusterService.getClusterSettings()); JiraService jiraService = new JiraService(settings, httpClient, clusterService.getClusterSettings()); SlackService slackService = new SlackService(settings, httpClient, clusterService.getClusterSettings()); PagerDutyService pagerDutyService = new PagerDutyService(settings, httpClient, clusterService.getClusterSettings()); reloadableServices.add(emailService); - reloadableServices.add(hipChatService); reloadableServices.add(jiraService); reloadableServices.add(slackService); reloadableServices.add(pagerDutyService); @@ -315,7 +310,6 @@ public Collection createComponents(Client client, ClusterService cluster actionFactoryMap.put(WebhookAction.TYPE, new WebhookActionFactory(httpClient, templateEngine)); actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, client)); actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(templateEngine)); - actionFactoryMap.put(HipChatAction.TYPE, new HipChatActionFactory(templateEngine, hipChatService)); actionFactoryMap.put(JiraAction.TYPE, new JiraActionFactory(templateEngine, jiraService)); actionFactoryMap.put(SlackAction.TYPE, new SlackActionFactory(templateEngine, slackService)); actionFactoryMap.put(PagerDutyAction.TYPE, new PagerDutyActionFactory(templateEngine, pagerDutyService)); @@ -420,7 +414,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) return Arrays.asList(registry, inputRegistry, historyStore, triggerService, triggeredWatchParser, watcherLifeCycleService, executionService, triggerEngineListener, watcherService, watchParser, - configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, slackService, pagerDutyService, hipChatService); + configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, slackService, pagerDutyService); } protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry) { @@ -481,7 +475,6 @@ public List> getSettings() { settings.addAll(SlackService.getSettings()); settings.addAll(EmailService.getSettings()); settings.addAll(HtmlSanitizer.getSettings()); - settings.addAll(HipChatService.getSettings()); settings.addAll(JiraService.getSettings()); settings.addAll(PagerDutyService.getSettings()); settings.add(ReportingAttachmentParser.RETRIES_SETTING); @@ -581,11 +574,9 @@ public void onIndexModule(IndexModule module) { } assert listener != null; - // for now, we only add this index operation listener to indices starting with .watches - // this also means, that aliases pointing to this index have to follow this notation - if (module.getIndex().getName().startsWith(Watch.INDEX)) { - module.addIndexOperationListener(listener); - } + // Attach a listener to every index so that we can react to alias changes. + // This listener will be a no-op except on the index pointed to by .watches + module.addIndexOperationListener(listener); } static void validAutoCreateIndex(Settings settings, Logger logger) { @@ -669,10 +660,12 @@ public List> getContexts() { @Override public void close() throws IOException { - bulkProcessor.flush(); + if (enabled) { + bulkProcessor.flush(); + } IOUtils.closeWhileHandlingException(httpClient); try { - if (bulkProcessor.awaitClose(10, TimeUnit.SECONDS) == false) { + if (enabled && bulkProcessor.awaitClose(10, TimeUnit.SECONDS) == false) { logger.warn("failed to properly close watcher bulk processor"); } } catch (InterruptedException e) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java index 5d5a30344ab12..3f02cfde7c7e7 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java @@ -28,7 +28,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class WatcherFeatureSet implements XPackFeatureSet { @@ -72,7 +71,7 @@ public Map nativeCodeInfo() { public void usage(ActionListener listener) { if (enabled) { try (ThreadContext.StoredContext ignore = - stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { WatcherClient watcherClient = new WatcherClient(client); WatcherStatsRequest request = new WatcherStatsRequest(); request.includeStats(true); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 48e8dd7813e19..54baf94e8d5bb 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -101,7 +101,7 @@ void setConfiguration(Configuration configuration) { */ @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { - if (isWatchDocument(shardId.getIndexName(), operation.type())) { + if (isWatchDocument(shardId.getIndexName())) { ZonedDateTime now = Instant.ofEpochMilli(clock.millis()).atZone(ZoneOffset.UTC); try { Watch watch = parser.parseWithSecrets(operation.id(), true, operation.source(), now, XContentType.JSON, @@ -150,7 +150,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { */ @Override public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { - if (isWatchDocument(shardId.getIndexName(), index.type())) { + if (isWatchDocument(shardId.getIndexName())) { logger.debug(() -> new ParameterizedMessage("removing watch [{}] from trigger", index.id()), ex); triggerService.remove(index.id()); } @@ -166,7 +166,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { */ @Override public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { - if (isWatchDocument(shardId.getIndexName(), delete.type())) { + if (isWatchDocument(shardId.getIndexName())) { triggerService.remove(delete.id()); } @@ -177,11 +177,10 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { * Check if a supplied index and document matches the current configuration for watcher * * @param index The index to check for - * @param docType The document type * @return true if this is a watch in the active watcher index, false otherwise */ - private boolean isWatchDocument(String index, String docType) { - return configuration.isIndexAndActive(index) && docType.equals(Watch.DOC_TYPE); + private boolean isWatchDocument(String index) { + return configuration.isIndexAndActive(index); } /** diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java index ab068c2e3457d..4d54b32e53d45 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java @@ -6,19 +6,18 @@ package org.elasticsearch.xpack.watcher.actions; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; -import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; -import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; -import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; import org.elasticsearch.xpack.watcher.actions.email.EmailAction; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; import org.elasticsearch.xpack.watcher.actions.index.IndexAction; import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction; import org.elasticsearch.xpack.watcher.actions.slack.SlackAction; import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; import java.util.Map; @@ -35,10 +34,18 @@ public static EmailAction.Builder emailAction(EmailTemplate email) { return EmailAction.builder(email); } + /** + * Types are deprecated and should not be used. use {@link #indexAction(String)} + */ + @Deprecated public static IndexAction.Builder indexAction(String index, String type) { return IndexAction.builder(index, type); } + public static IndexAction.Builder indexAction(String index) { + return IndexAction.builder(index); + } + public static JiraAction.Builder jiraAction(String account, MapBuilder fields) { return jiraAction(account, fields.immutableMap()); } @@ -63,22 +70,6 @@ public static LoggingAction.Builder loggingAction(TextTemplate text) { return LoggingAction.builder(text); } - public static HipChatAction.Builder hipchatAction(String message) { - return hipchatAction(new TextTemplate(message)); - } - - public static HipChatAction.Builder hipchatAction(String account, String body) { - return hipchatAction(account, new TextTemplate(body)); - } - - public static HipChatAction.Builder hipchatAction(TextTemplate body) { - return hipchatAction(null, body); - } - - public static HipChatAction.Builder hipchatAction(String account, TextTemplate body) { - return HipChatAction.builder(account, body); - } - public static SlackAction.Builder slackAction(String account, SlackMessage.Template.Builder message) { return slackAction(account, message.build()); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java deleted file mode 100644 index 176de8b945df2..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.actions.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.xpack.core.watcher.actions.Action; -import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; -import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; -import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; -import org.elasticsearch.xpack.watcher.support.Variables; - -import java.util.Map; - -public class ExecutableHipChatAction extends ExecutableAction { - - private final TextTemplateEngine templateEngine; - private final HipChatService hipchatService; - - public ExecutableHipChatAction(HipChatAction action, Logger logger, HipChatService hipchatService, - TextTemplateEngine templateEngine) { - super(action, logger); - this.hipchatService = hipchatService; - this.templateEngine = templateEngine; - } - - @Override - public Action.Result execute(final String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { - - HipChatAccount account = hipchatService.getAccount(action.account); - // lets validate the message again, in case the hipchat service were updated since the - // watch/action were created. - account.validateParsedTemplate(ctx.id().watchId(), actionId, action.message); - - Map model = Variables.createCtxParamsMap(ctx, payload); - HipChatMessage message = account.render(ctx.id().watchId(), actionId, templateEngine, action.message, model); - - if (ctx.simulateAction(actionId)) { - return new HipChatAction.Result.Simulated(message); - } - - SentMessages sentMessages = account.send(message, action.proxy); - return new HipChatAction.Result.Executed(sentMessages); - } - -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java deleted file mode 100644 index 57b5bfa591e4d..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.actions.hipchat; - - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.watcher.actions.Action; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; -import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; - -import java.io.IOException; -import java.util.Objects; - -public class HipChatAction implements Action { - - public static final String TYPE = "hipchat"; - - @Nullable final String account; - @Nullable final HttpProxy proxy; - final HipChatMessage.Template message; - - public HipChatAction(@Nullable String account, HipChatMessage.Template message, @Nullable HttpProxy proxy) { - this.account = account; - this.message = message; - this.proxy = proxy; - } - - @Override - public String type() { - return TYPE; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - HipChatAction that = (HipChatAction) o; - - return Objects.equals(account, that.account) && - Objects.equals(message, that.message) && - Objects.equals(proxy, that.proxy); - } - - @Override - public int hashCode() { - return Objects.hash(account, message, proxy); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (account != null) { - builder.field(Field.ACCOUNT.getPreferredName(), account); - } - if (proxy != null) { - proxy.toXContent(builder, params); - } - builder.field(Field.MESSAGE.getPreferredName(), message); - return builder.endObject(); - } - - public static HipChatAction parse(String watchId, String actionId, XContentParser parser) throws IOException { - String account = null; - HipChatMessage.Template message = null; - HttpProxy proxy = null; - - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { - if (token == XContentParser.Token.VALUE_STRING) { - account = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. expected [{}] to be of type string, but " + - "found [{}] instead", TYPE, watchId, actionId, Field.ACCOUNT.getPreferredName(), token); - } - } else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { - proxy = HttpProxy.parse(parser); - } else if (Field.MESSAGE.match(currentFieldName, parser.getDeprecationHandler())) { - try { - message = HipChatMessage.Template.parse(parser); - } catch (Exception e) { - throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. failed to parse [{}] field", e, TYPE, - watchId, actionId, Field.MESSAGE.getPreferredName()); - } - } else { - throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, - actionId, token); - } - } - - if (message == null) { - throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. missing required [{}] field", TYPE, watchId, - actionId, Field.MESSAGE.getPreferredName()); - } - - return new HipChatAction(account, message, proxy); - } - - public static Builder builder(String account, TextTemplate body) { - return new Builder(account, body); - } - - public interface Result { - - class Executed extends Action.Result implements Result { - - private final SentMessages sentMessages; - - public Executed(SentMessages sentMessages) { - super(TYPE, status(sentMessages)); - this.sentMessages = sentMessages; - } - - public SentMessages sentMessages() { - return sentMessages; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.field(type, sentMessages, params); - } - - static Status status(SentMessages sentMessages) { - boolean hasSuccesses = false; - boolean hasFailures = false; - for (SentMessages.SentMessage message : sentMessages) { - if (message.isSuccess()) { - hasSuccesses = true; - } else { - hasFailures = true; - } - if (hasFailures && hasSuccesses) { - return Status.PARTIAL_FAILURE; - } - } - return hasFailures ? Status.FAILURE : Status.SUCCESS; - } - } - - class Simulated extends Action.Result implements Result { - - private final HipChatMessage message; - - protected Simulated(HipChatMessage message) { - super(TYPE, Status.SIMULATED); - this.message = message; - } - - public HipChatMessage getMessage() { - return message; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject(type) - .field(Field.MESSAGE.getPreferredName(), message, params) - .endObject(); - } - } - } - - public static class Builder implements Action.Builder { - - final String account; - final HipChatMessage.Template.Builder messageBuilder; - private HttpProxy proxy; - - public Builder(String account, TextTemplate body) { - this.account = account; - this.messageBuilder = new HipChatMessage.Template.Builder(body); - } - - public Builder addRooms(TextTemplate... rooms) { - messageBuilder.addRooms(rooms); - return this; - } - - public Builder addRooms(String... rooms) { - TextTemplate[] templates = new TextTemplate[rooms.length]; - for (int i = 0; i < rooms.length; i++) { - templates[i] = new TextTemplate(rooms[i]); - } - return addRooms(templates); - } - - - public Builder addUsers(TextTemplate... users) { - messageBuilder.addUsers(users); - return this; - } - - public Builder addUsers(String... users) { - TextTemplate[] templates = new TextTemplate[users.length]; - for (int i = 0; i < users.length; i++) { - templates[i] = new TextTemplate(users[i]); - } - return addUsers(templates); - } - - public Builder setFrom(String from) { - messageBuilder.setFrom(from); - return this; - } - - public Builder setFormat(HipChatMessage.Format format) { - messageBuilder.setFormat(format); - return this; - } - - public Builder setColor(TextTemplate color) { - messageBuilder.setColor(color); - return this; - } - - public Builder setColor(HipChatMessage.Color color) { - return setColor(color.asTemplate()); - } - - public Builder setNotify(boolean notify) { - messageBuilder.setNotify(notify); - return this; - } - - public Builder setProxy(HttpProxy proxy) { - this.proxy = proxy; - return this; - } - - @Override - public HipChatAction build() { - return new HipChatAction(account, messageBuilder.build(), proxy); - } - } - - public interface Field { - ParseField ACCOUNT = new ParseField("account"); - ParseField MESSAGE = new ParseField("message"); - ParseField PROXY = new ParseField("proxy"); - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java deleted file mode 100644 index 6b9f053d1db49..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.actions.hipchat; - -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; - -import java.io.IOException; - -public class HipChatActionFactory extends ActionFactory { - - private final TextTemplateEngine templateEngine; - private final HipChatService hipchatService; - - public HipChatActionFactory(TextTemplateEngine templateEngine, HipChatService hipchatService) { - super(LogManager.getLogger(ExecutableHipChatAction.class)); - this.templateEngine = templateEngine; - this.hipchatService = hipchatService; - } - - @Override - public ExecutableHipChatAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { - HipChatAction action = HipChatAction.parse(watchId, actionId, parser); - HipChatAccount account = hipchatService.getAccount(action.account); - account.validateParsedTemplate(watchId, actionId, action.message); - return new ExecutableHipChatAction(action, actionLogger, hipchatService, templateEngine); - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java index c6a8375937374..ba93d4268baed 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java @@ -29,7 +29,7 @@ public class IndexAction implements Action { public static final String TYPE = "index"; - @Nullable final String docType; + @Nullable @Deprecated final String docType; @Nullable final String index; @Nullable final String docId; @Nullable final String executionTimeField; @@ -40,6 +40,15 @@ public class IndexAction implements Action { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(IndexAction.class)); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in a watcher index action is deprecated."; + public IndexAction(@Nullable String index, @Nullable String docId, + @Nullable String executionTimeField, + @Nullable TimeValue timeout, @Nullable ZoneId dynamicNameTimeZone, @Nullable RefreshPolicy refreshPolicy) { + this(index, null, docId, executionTimeField, timeout, dynamicNameTimeZone, refreshPolicy); + } + /** + * Document types are deprecated, use constructor without docType + */ + @Deprecated public IndexAction(@Nullable String index, @Nullable String docType, @Nullable String docId, @Nullable String executionTimeField, @Nullable TimeValue timeout, @Nullable ZoneId dynamicNameTimeZone, @Nullable RefreshPolicy refreshPolicy) { @@ -188,10 +197,18 @@ public static IndexAction parse(String watchId, String actionId, XContentParser return new IndexAction(index, docType, docId, executionTimeField, timeout, dynamicNameTimeZone, refreshPolicy); } + /** + * Document types are deprecated, use {@link #builder(java.lang.String)} + */ + @Deprecated public static Builder builder(String index, String docType) { return new Builder(index, docType); } + public static Builder builder(String index) { + return new Builder(index); + } + public static class Result extends Action.Result { private final XContentSource response; @@ -278,11 +295,20 @@ public static class Builder implements Action.Builder { ZoneId dynamicNameTimeZone; RefreshPolicy refreshPolicy; + /** + * Document types are deprecated and should not be used. Use: {@link Builder#Builder(java.lang.String)} + */ + @Deprecated private Builder(String index, String docType) { this.index = index; this.docType = docType; } + private Builder(String index) { + this.index = index; + this.docType = null; + } + public Builder setDocId(String docId) { this.docId = docId; return this; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index 654bc6b757dde..e0d3129e75f34 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -300,7 +300,7 @@ private HttpProxy getProxyFromSettings(Settings settings) { Scheme.parse(HttpSettings.PROXY_SCHEME.get(settings)) : Scheme.HTTP; int proxyPort = HttpSettings.PROXY_PORT.get(settings); if (proxyPort != 0 && Strings.hasText(proxyHost)) { - logger.info("Using default proxy for http input and slack/hipchat/pagerduty/webhook actions [{}:{}]", proxyHost, proxyPort); + logger.info("Using default proxy for http input and slack/pagerduty/webhook actions [{}:{}]", proxyHost, proxyPort); } else if (proxyPort != 0 ^ Strings.hasText(proxyHost)) { throw new IllegalArgumentException("HTTP proxy requires both settings: [" + HttpSettings.PROXY_HOST.getKey() + "] and [" + HttpSettings.PROXY_PORT.getKey() + "]"); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index 237da4ae6ae1e..69b1079449604 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.watcher.execution; import com.google.common.collect.Iterables; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -15,8 +16,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; @@ -32,19 +35,25 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; import org.elasticsearch.xpack.core.watcher.common.stats.Counters; import org.elasticsearch.xpack.core.watcher.condition.Condition; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.execution.QueuedWatch; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.core.watcher.transform.Transform; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Watch; @@ -66,13 +75,14 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; public class ExecutionService { @@ -352,11 +362,11 @@ public void updateWatchStatus(Watch watch) throws IOException { .field(WatchField.STATUS.getPreferredName(), watch.status(), params) .endObject(); - UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, watch.id()); + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, watch.id()); updateRequest.doc(source); updateRequest.setIfSeqNo(watch.getSourceSeqNo()); updateRequest.setIfPrimaryTerm(watch.getSourcePrimaryTerm()); - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { client.update(updateRequest).actionGet(indexDefaultTimeout); } catch (DocumentMissingException e) { // do not rethrow this exception, otherwise the watch history will contain an exception @@ -399,22 +409,68 @@ private void executeAsync(WatchExecutionContext ctx, final TriggeredWatch trigge try { executor.execute(new WatchExecutionTask(ctx, () -> execute(ctx))); } catch (EsRejectedExecutionException e) { - String message = "failed to run triggered watch [" + triggeredWatch.id() + "] due to thread pool capacity"; - WatchRecord record = ctx.abortBeforeExecution(ExecutionState.THREADPOOL_REJECTION, message); - try { - if (ctx.overrideRecordOnConflict()) { - historyStore.forcePut(record); - } else { - historyStore.put(record); + //Using the generic pool here since this can happen from a write thread and we don't want to block a write + //thread to kick off these additional write/delete requests. + //Intentionally not using the HistoryStore or TriggerWatchStore to avoid re-using the same synchronous + //BulkProcessor which can cause a deadlock see #41390 + genericExecutor.execute(new WatchExecutionTask(ctx, () -> { + String message = "failed to run triggered watch [" + triggeredWatch.id() + "] due to thread pool capacity"; + logger.warn(message); + WatchRecord record = ctx.abortBeforeExecution(ExecutionState.THREADPOOL_REJECTION, message); + try { + forcePutHistory(record); + } catch (Exception exc) { + logger.error((Supplier) () -> + new ParameterizedMessage( + "Error storing watch history record for watch [{}] after thread pool rejection", + triggeredWatch.id()), exc); } - } catch (Exception exc) { - logger.error((Supplier) () -> - new ParameterizedMessage("Error storing watch history record for watch [{}] after thread pool rejection", - triggeredWatch.id()), exc); + deleteTrigger(triggeredWatch.id()); + })); + } + } + + /** + * Stores the specified watchRecord. + * Any existing watchRecord will be overwritten. + */ + private void forcePutHistory(WatchRecord watchRecord) { + String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); + try { + try (XContentBuilder builder = XContentFactory.jsonBuilder(); + ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { + watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); + IndexRequest request = new IndexRequest(index) + .id(watchRecord.id().value()) + .source(builder) + .opType(IndexRequest.OpType.CREATE); + client.index(request).get(30, TimeUnit.SECONDS); + logger.debug("indexed watch history record [{}]", watchRecord.id().value()); + } catch (VersionConflictEngineException vcee) { + watchRecord = new WatchRecord.MessageWatchRecord(watchRecord, ExecutionState.EXECUTED_MULTIPLE_TIMES, + "watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]"); + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); + ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { + IndexRequest request = new IndexRequest(index) + .id(watchRecord.id().value()) + .source(xContentBuilder.value(watchRecord)); + client.index(request).get(30, TimeUnit.SECONDS); + } + logger.debug("overwrote watch history record [{}]", watchRecord.id().value()); } + } catch (InterruptedException | ExecutionException | TimeoutException | IOException ioe) { + final WatchRecord wr = watchRecord; + logger.error((Supplier) () -> new ParameterizedMessage("failed to persist watch record [{}]", wr), ioe); + } + } - triggeredWatchStore.delete(triggeredWatch.id()); + private void deleteTrigger(Wid watcherId) { + DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME); + request.id(watcherId.value()); + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { + client.delete(request).actionGet(30, TimeUnit.SECONDS); } + logger.trace("successfully deleted triggered watch with id [{}]", watcherId); } WatchRecord executeInner(WatchExecutionContext ctx) { @@ -500,8 +556,8 @@ public void executeTriggeredWatches(Collection triggeredWatches) * @return The GetResponse of calling the get API of this watch */ private GetResponse getWatch(String id) { - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, id).preference(Preference.LOCAL.type()).realtime(true); + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(WATCHER_ORIGIN)) { + GetRequest getRequest = new GetRequest(Watch.INDEX, id).preference(Preference.LOCAL.type()).realtime(true); PlainActionFuture future = PlainActionFuture.newFuture(); client.get(getRequest, future); return future.actionGet(); @@ -534,12 +590,12 @@ private void clearExecutions() { // the watch execution task takes another runnable as parameter // the best solution would be to move the whole execute() method, which is handed over as ctor parameter // over into this class, this is the quicker way though - static final class WatchExecutionTask implements Runnable { + public static final class WatchExecutionTask implements Runnable { private final WatchExecutionContext ctx; private final Runnable runnable; - WatchExecutionTask(WatchExecutionContext ctx, Runnable runnable) { + public WatchExecutionTask(WatchExecutionContext ctx, Runnable runnable) { this.ctx = ctx; this.runnable = runnable; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index c5ca671e13e65..f3d87dc6ada63 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -96,8 +96,7 @@ public BulkResponse putAll(final List triggeredWatches) throws I private BulkRequest createBulkRequest(final List triggeredWatches) throws IOException { BulkRequest request = new BulkRequest(); for (TriggeredWatch triggeredWatch : triggeredWatches) { - IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, - triggeredWatch.id().value()); + IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME).id(triggeredWatch.id().value()); try (XContentBuilder builder = XContentFactory.jsonBuilder()) { triggeredWatch.toXContent(builder, ToXContent.EMPTY_PARAMS); indexRequest.source(builder); @@ -115,7 +114,7 @@ private BulkRequest createBulkRequest(final List triggeredWatche * @param wid The ID os the triggered watch id */ public void delete(Wid wid) { - DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, wid.value()); + DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME, wid.value()); bulkProcessor.add(request); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java index 42f5f79a37de2..735af58095d0d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -28,8 +28,6 @@ public class HistoryStore { - public static final String DOC_TYPE = "doc"; - private static final Logger logger = LogManager.getLogger(HistoryStore.class); private final BulkProcessor bulkProcessor; @@ -47,7 +45,7 @@ public void put(WatchRecord watchRecord) throws Exception { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); - IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()).source(builder); + IndexRequest request = new IndexRequest(index).id(watchRecord.id().value()).source(builder); request.opType(IndexRequest.OpType.CREATE); bulkProcessor.add(request); } catch (IOException ioe) { @@ -64,7 +62,7 @@ public void forcePut(WatchRecord watchRecord) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); - IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()).source(builder); + IndexRequest request = new IndexRequest(index).id(watchRecord.id().value()).source(builder); bulkProcessor.add(request); } catch (IOException ioe) { final WatchRecord wr = watchRecord; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java deleted file mode 100644 index 67aee91f13976..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; - -import java.io.IOException; -import java.util.Locale; -import java.util.Map; - -public abstract class HipChatAccount { - - public static final String ROOM_SETTING = HipChatMessage.Field.ROOM.getPreferredName(); - public static final String DEFAULT_ROOM_SETTING = "message_defaults." + HipChatMessage.Field.ROOM.getPreferredName(); - public static final String DEFAULT_USER_SETTING = "message_defaults." + HipChatMessage.Field.USER.getPreferredName(); - public static final String DEFAULT_FROM_SETTING = "message_defaults." + HipChatMessage.Field.FROM.getPreferredName(); - public static final String DEFAULT_FORMAT_SETTING = "message_defaults." + HipChatMessage.Field.FORMAT.getPreferredName(); - public static final String DEFAULT_COLOR_SETTING = "message_defaults." + HipChatMessage.Field.COLOR.getPreferredName(); - public static final String DEFAULT_NOTIFY_SETTING = "message_defaults." + HipChatMessage.Field.NOTIFY.getPreferredName(); - - static final Setting SECURE_AUTH_TOKEN_SETTING = SecureSetting.secureString("secure_auth_token", null); - - protected final Logger logger; - protected final String name; - protected final Profile profile; - protected final HipChatServer server; - protected final HttpClient httpClient; - protected final String authToken; - - protected HipChatAccount(String name, Profile profile, Settings settings, HipChatServer defaultServer, HttpClient httpClient, - Logger logger) { - this.name = name; - this.profile = profile; - this.server = new HipChatServer(settings, defaultServer); - this.httpClient = httpClient; - this.authToken = getAuthToken(name, settings); - this.logger = logger; - } - - private static String getAuthToken(String name, Settings settings) { - SecureString secureString = SECURE_AUTH_TOKEN_SETTING.get(settings); - if (secureString == null || secureString.length() < 1) { - throw new SettingsException( - "hipchat account [" + name + "] missing required [" + SECURE_AUTH_TOKEN_SETTING.getKey() + "] secure setting"); - } - return secureString.toString(); - } - - public abstract String type(); - - public abstract void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template message) throws SettingsException; - - public abstract HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, - Map model); - - public abstract SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy); - - public enum Profile { - - V1() { - @Override - HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, - Logger logger) { - return new V1Account(name, settings, defaultServer, httpClient, logger); - } - }, - INTEGRATION() { - @Override - HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, - Logger logger) { - return new IntegrationAccount(name, settings, defaultServer, httpClient, logger); - } - }, - USER() { - @Override - HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, - Logger logger) { - return new UserAccount(name, settings, defaultServer, httpClient, logger); - } - }; - - abstract HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, - Logger logger); - - public String value() { - return name().toLowerCase(Locale.ROOT); - } - - public static Profile parse(XContentParser parser) throws IOException { - return Profile.valueOf(parser.text().toUpperCase(Locale.ROOT)); - } - - public static Profile resolve(String value, Profile defaultValue) { - if (value == null) { - return defaultValue; - } - return Profile.valueOf(value.toUpperCase(Locale.ROOT)); - } - - public static Profile resolve(Settings settings, String setting, Profile defaultValue) { - return resolve(settings.get(setting), defaultValue); - } - - public static boolean validate(String value) { - try { - Profile.valueOf(value.toUpperCase(Locale.ROOT)); - return true; - } catch (IllegalArgumentException ilae) { - return false; - } - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java deleted file mode 100644 index 10eaba535edf1..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java +++ /dev/null @@ -1,457 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Objects; - -public class HipChatMessage implements ToXContentObject { - - final String body; - @Nullable final String[] rooms; - @Nullable final String[] users; - @Nullable final String from; - @Nullable final Format format; - @Nullable final Color color; - @Nullable final Boolean notify; - - public HipChatMessage(String body, String[] rooms, String[] users, String from, Format format, Color color, Boolean notify) { - this.body = body; - this.rooms = rooms; - this.users = users; - this.from = from; - this.format = format; - this.color = color; - this.notify = notify; - } - - public String getBody() { - return body; - } - - public String[] getRooms() { - return rooms; - } - - @Nullable - public String[] getUsers() { - return users; - } - - @Nullable - public String getFrom() { - return from; - } - - @Nullable - public Format getFormat() { - return format; - } - - @Nullable - public Color getColor() { - return color; - } - - @Nullable - public Boolean getNotify() { - return notify; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - HipChatMessage that = (HipChatMessage) o; - return Objects.equals(body, that.body) && - Objects.deepEquals(rooms, that.rooms) && - Objects.deepEquals(users, that.users) && - Objects.equals(from, that.from) && - Objects.equals(format, that.format) && - Objects.equals(color, that.color) && - Objects.equals(notify, that.notify); - } - - @Override - public int hashCode() { - int result = body.hashCode(); - result = 31 * result + (rooms != null ? Arrays.hashCode(rooms) : 0); - result = 31 * result + (users != null ? Arrays.hashCode(users) : 0); - result = 31 * result + (from != null ? from.hashCode() : 0); - result = 31 * result + (format != null ? format.hashCode() : 0); - result = 31 * result + (color != null ? color.hashCode() : 0); - result = 31 * result + (notify != null ? notify.hashCode() : 0); - return result; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return toXContent(builder, params, true); - } - - public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeTargets) throws IOException { - builder.startObject(); - if (from != null) { - builder.field(Field.FROM.getPreferredName(), from); - } - if (includeTargets) { - if (rooms != null && rooms.length > 0) { - builder.array(Field.ROOM.getPreferredName(), rooms); - } - if (users != null && users.length > 0) { - builder.array(Field.USER.getPreferredName(), users); - } - } - builder.field(Field.BODY.getPreferredName(), body); - if (format != null) { - builder.field(Field.FORMAT.getPreferredName(), format.value()); - } - if (color != null) { - builder.field(Field.COLOR.getPreferredName(), color.value()); - } - if (notify != null) { - builder.field(Field.NOTIFY.getPreferredName(), notify); - } - return builder.endObject(); - } - - public static class Template implements ToXContentObject { - - final TextTemplate body; - @Nullable final TextTemplate[] rooms; - @Nullable final TextTemplate[] users; - @Nullable final String from; - @Nullable final Format format; - @Nullable final TextTemplate color; - @Nullable final Boolean notify; - - public Template(TextTemplate body, - TextTemplate[] rooms, - TextTemplate[] users, - String from, - Format format, - TextTemplate color, - Boolean notify) { - this.rooms = rooms; - this.users = users; - this.body = body; - this.from = from; - this.format = format; - this.color = color; - this.notify = notify; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Template template = (Template) o; - - return Objects.equals(body, template.body) && - Objects.deepEquals(rooms, template.rooms) && - Objects.deepEquals(users, template.users) && - Objects.equals(from, template.from) && - Objects.equals(format, template.format) && - Objects.equals(color, template.color) && - Objects.equals(notify, template.notify); - } - - @Override - public int hashCode() { - return Objects.hash(body, rooms, users, from, format, color, notify); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (from != null) { - builder.field(Field.FROM.getPreferredName(), from); - } - if (rooms != null && rooms.length > 0) { - builder.startArray(Field.ROOM.getPreferredName()); - for (TextTemplate room : rooms) { - room.toXContent(builder, params); - } - builder.endArray(); - } - if (users != null && users.length > 0) { - builder.startArray(Field.USER.getPreferredName()); - for (TextTemplate user : users) { - user.toXContent(builder, params); - } - builder.endArray(); - } - builder.field(Field.BODY.getPreferredName(), body, params); - if (format != null) { - builder.field(Field.FORMAT.getPreferredName(), format.value()); - } - if (color != null) { - builder.field(Field.COLOR.getPreferredName(), color, params); - } - if (notify != null) { - builder.field(Field.NOTIFY.getPreferredName(), notify); - } - return builder.endObject(); - } - - public static Template parse(XContentParser parser) throws IOException { - TextTemplate body = null; - TextTemplate[] rooms = null; - TextTemplate[] users = null; - String from = null; - TextTemplate color = null; - Boolean notify = null; - HipChatMessage.Format messageFormat = null; - - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (Field.FROM.match(currentFieldName, parser.getDeprecationHandler())) { - from = parser.text(); - } else if (Field.ROOM.match(currentFieldName, parser.getDeprecationHandler())) { - List templates = new ArrayList<>(); - if (token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - try { - templates.add(TextTemplate.parse(parser)); - } catch (ElasticsearchParseException epe) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, - Field.ROOM.getPreferredName()); - } - } - } else { - try { - templates.add(TextTemplate.parse(parser)); - } catch (ElasticsearchParseException epe) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, - Field.ROOM.getPreferredName()); - } - } - rooms = templates.toArray(new TextTemplate[templates.size()]); - } else if (Field.USER.match(currentFieldName, parser.getDeprecationHandler())) { - List templates = new ArrayList<>(); - if (token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - try { - templates.add(TextTemplate.parse(parser)); - } catch (ElasticsearchParseException epe) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, - Field.USER.getPreferredName()); - } - } - } else { - try { - templates.add(TextTemplate.parse(parser)); - } catch (ElasticsearchParseException epe) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, - Field.USER.getPreferredName()); - } - } - users = templates.toArray(new TextTemplate[templates.size()]); - } else if (Field.COLOR.match(currentFieldName, parser.getDeprecationHandler())) { - try { - color = TextTemplate.parse(parser); - } catch (ElasticsearchParseException | IllegalArgumentException e) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", e, - Field.COLOR.getPreferredName()); - } - } else if (Field.NOTIFY.match(currentFieldName, parser.getDeprecationHandler())) { - if (token == XContentParser.Token.VALUE_BOOLEAN) { - notify = parser.booleanValue(); - } else { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field, expected a " + - "boolean value but found [{}]", Field.NOTIFY.getPreferredName(), token); - } - } else if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { - try { - body = TextTemplate.parse(parser); - } catch (ElasticsearchParseException pe) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", pe, - Field.BODY.getPreferredName()); - } - } else if (Field.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { - try { - messageFormat = HipChatMessage.Format.parse(parser); - } catch (IllegalArgumentException ilae) { - throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", ilae, - Field.FORMAT.getPreferredName()); - } - } else { - throw new ElasticsearchParseException("failed to parse hipchat message. unexpected field [{}]", currentFieldName); - } - } - - if (body == null) { - throw new ElasticsearchParseException("failed to parse hipchat message. missing required [{}] field", - Field.BODY.getPreferredName()); - } - - return new HipChatMessage.Template(body, rooms, users, from, messageFormat, color, notify); - } - - public static class Builder { - - final TextTemplate body; - final List rooms = new ArrayList<>(); - final List users = new ArrayList<>(); - @Nullable String from; - @Nullable Format format; - @Nullable TextTemplate color; - @Nullable Boolean notify; - - public Builder(TextTemplate body) { - this.body = body; - } - - public Builder addRooms(TextTemplate... rooms) { - this.rooms.addAll(Arrays.asList(rooms)); - return this; - } - - public Builder addUsers(TextTemplate... users) { - this.users.addAll(Arrays.asList(users)); - return this; - } - - public Builder setFrom(String from) { - this.from = from; - return this; - } - - public Builder setFormat(Format format) { - this.format = format; - return this; - } - - public Builder setColor(TextTemplate color) { - this.color = color; - return this; - } - - public Builder setNotify(boolean notify) { - this.notify = notify; - return this; - } - - public Template build() { - return new Template( - body, - rooms.isEmpty() ? null : rooms.toArray(new TextTemplate[rooms.size()]), - users.isEmpty() ? null : users.toArray(new TextTemplate[users.size()]), - from, - format, - color, - notify); - } - } - } - - - public enum Color { - YELLOW, GREEN, RED, PURPLE, GRAY, RANDOM; - - private final TextTemplate template = new TextTemplate(name()); - - public TextTemplate asTemplate() { - return template; - } - - public String value() { - return name().toLowerCase(Locale.ROOT); - } - - public static Color parse(XContentParser parser) throws IOException { - return Color.valueOf(parser.text().toUpperCase(Locale.ROOT)); - } - - public static Color resolve(String value, Color defaultValue) { - if (value == null) { - return defaultValue; - } - return Color.valueOf(value.toUpperCase(Locale.ROOT)); - } - - public static Color resolve(Settings settings, String setting, Color defaultValue) { - return resolve(settings.get(setting), defaultValue); - } - - public static boolean validate(String value) { - try { - Color.valueOf(value.toUpperCase(Locale.ROOT)); - return true; - } catch (IllegalArgumentException ilae) { - return false; - } - } - } - - public enum Format { - - TEXT, - HTML; - - private final TextTemplate template = new TextTemplate(name()); - - public TextTemplate asTemplate() { - return template; - } - - public String value() { - return name().toLowerCase(Locale.ROOT); - } - - public static Format parse(XContentParser parser) throws IOException { - return Format.valueOf(parser.text().toUpperCase(Locale.ROOT)); - } - - public static Format resolve(String value, Format defaultValue) { - if (value == null) { - return defaultValue; - } - return Format.valueOf(value.toUpperCase(Locale.ROOT)); - } - - public static Format resolve(Settings settings, String setting, Format defaultValue) { - return resolve(settings.get(setting), defaultValue); - } - - public static boolean validate(String value) { - try { - Format.valueOf(value.toUpperCase(Locale.ROOT)); - return true; - } catch (IllegalArgumentException ilae) { - return false; - } - } - } - - public interface Field { - ParseField ROOM = new ParseField("room"); - ParseField USER = new ParseField("user"); - ParseField BODY = new ParseField("body"); - ParseField FROM = new ParseField("from"); - ParseField COLOR = new ParseField("color"); - ParseField NOTIFY = new ParseField("notify"); - ParseField FORMAT = new ParseField("format"); - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java deleted file mode 100644 index 4a40a1d47fbc3..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; - -public class HipChatServer { - - public static final String HOST_SETTING = "host"; - public static final String PORT_SETTING = "port"; - - public static final HipChatServer DEFAULT = new HipChatServer("api.hipchat.com", 443, null); - - private final String host; - private final int port; - private final HipChatServer fallback; - - public HipChatServer(Settings settings) { - this(settings, DEFAULT); - } - - public HipChatServer(Settings settings, HipChatServer fallback) { - this(settings.get(HOST_SETTING, null), settings.getAsInt(PORT_SETTING, -1), fallback); - } - - public HipChatServer(String host, int port, HipChatServer fallback) { - this.host = host; - this.port = port; - this.fallback = fallback; - } - - public String host() { - return host != null ? host : fallback.host(); - } - - public int port() { - return port > 0 ? port : fallback.port(); - } - - public HipChatServer fallback() { - return fallback != null ? fallback : DEFAULT; - } - - public HipChatServer rebuild(Settings settings, HipChatServer fallback) { - return new HipChatServer(settings.get(HOST_SETTING, host), settings.getAsInt(PORT_SETTING, port), fallback); - } - - public synchronized HttpRequest.Builder httpRequest() { - return HttpRequest.builder(host(), port()); - } - -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java deleted file mode 100644 index efa403fd7697e..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.notification.NotificationService; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * A component to store hipchat credentials. - */ -public class HipChatService extends NotificationService { - - private static final Setting SETTING_DEFAULT_ACCOUNT = - Setting.simpleString("xpack.notification.hipchat.default_account", Setting.Property.Dynamic, Setting.Property.NodeScope); - - static final Setting SETTING_DEFAULT_HOST = - Setting.simpleString("xpack.notification.hipchat.host", Setting.Property.Dynamic, Setting.Property.NodeScope); - - static final Setting SETTING_DEFAULT_PORT = - Setting.intSetting("xpack.notification.hipchat.port", 443, Setting.Property.Dynamic, Setting.Property.NodeScope); - - private static final Setting.AffixSetting SETTING_AUTH_TOKEN_SECURE = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "secure_auth_token", - (key) -> SecureSetting.secureString(key, null)); - - private static final Setting.AffixSetting SETTING_PROFILE = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "profile", - (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); - - private static final Setting.AffixSetting SETTING_ROOM = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "room", - (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); - - private static final Setting.AffixSetting SETTING_HOST = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "host", - (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); - - private static final Setting.AffixSetting SETTING_PORT = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "port", - (key) -> Setting.intSetting(key, 443, Setting.Property.Dynamic, Setting.Property.NodeScope)); - - private static final Setting.AffixSetting SETTING_MESSAGE_DEFAULTS = - Setting.affixKeySetting("xpack.notification.hipchat.account.", "message", - (key) -> Setting.groupSetting(key + ".", Setting.Property.Dynamic, Setting.Property.NodeScope)); - - private static final Logger logger = LogManager.getLogger(HipChatService.class); - - private final HttpClient httpClient; - private HipChatServer defaultServer; - - public HipChatService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super("hipchat", settings, clusterSettings, HipChatService.getDynamicSettings(), HipChatService.getSecureSettings()); - this.httpClient = httpClient; - // ensure logging of setting changes - clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); - clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_HOST, (s) -> {}); - clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_PORT, (s) -> {}); - clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {}); - clusterSettings.addAffixUpdateConsumer(SETTING_ROOM, (s, o) -> {}, (s, o) -> {}); - clusterSettings.addAffixUpdateConsumer(SETTING_HOST, (s, o) -> {}, (s, o) -> {}); - clusterSettings.addAffixUpdateConsumer(SETTING_PORT, (s, o) -> {}, (s, o) -> {}); - clusterSettings.addAffixUpdateConsumer(SETTING_MESSAGE_DEFAULTS, (s, o) -> {}, (s, o) -> {}); - // do an initial load - reload(settings); - } - - @Override - public synchronized void reload(Settings settings) { - defaultServer = new HipChatServer(settings.getByPrefix("xpack.notification.hipchat.")); - super.reload(settings); - } - - @Override - protected HipChatAccount createAccount(String name, Settings accountSettings) { - HipChatAccount.Profile profile = HipChatAccount.Profile.resolve(accountSettings, "profile", null); - if (profile == null) { - throw new SettingsException("missing [profile] setting for hipchat account [" + name + "]"); - } - return profile.createAccount(name, accountSettings, defaultServer, httpClient, logger); - } - - private static List> getDynamicSettings() { - return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_PROFILE, SETTING_ROOM, SETTING_MESSAGE_DEFAULTS, - SETTING_DEFAULT_HOST, SETTING_DEFAULT_PORT, SETTING_HOST, SETTING_PORT); - } - - private static List> getSecureSettings() { - return Arrays.asList(SETTING_AUTH_TOKEN_SECURE); - } - - public static List> getSettings() { - List> allSettings = new ArrayList>(getDynamicSettings()); - allSettings.addAll(getSecureSettings()); - return allSettings; - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java deleted file mode 100644 index c33e788b61451..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -public class IntegrationAccount extends HipChatAccount { - - public static final String TYPE = "integration"; - - final String room; - final Defaults defaults; - - public IntegrationAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { - super(name, Profile.INTEGRATION, settings, defaultServer, httpClient, logger); - List rooms = settings.getAsList(ROOM_SETTING, null); - if (rooms == null || rooms.isEmpty()) { - throw new SettingsException("invalid hipchat account [" + name + "]. missing required [" + ROOM_SETTING + "] setting for [" + - TYPE + "] account profile"); - } - if (rooms.size() > 1) { - throw new SettingsException("invalid hipchat account [" + name + "]. [" + ROOM_SETTING + "] setting for [" + TYPE + "] " + - "account must only be set with a single value"); - } - this.room = rooms.get(0); - defaults = new Defaults(settings); - } - - @Override - public String type() { - return TYPE; - } - - @Override - public void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template template) throws SettingsException { - if (template.rooms != null) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + - "action. [" + name + "] hipchat account doesn't support custom rooms"); - } - if (template.users != null) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + - "action. [" + name + "] hipchat account doesn't support user private messages"); - } - if (template.from != null) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + - "action. [" + name + "] hipchat account doesn't support custom `from` fields"); - } - } - - @Override - public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, - Map model) { - String message = engine.render(template.body, model); - Color color = template.color != null ? Color.resolve(engine.render(template.color, model), defaults.color) : defaults.color; - Boolean notify = template.notify != null ? template.notify : defaults.notify; - Format messageFormat = template.format != null ? template.format : defaults.format; - return new HipChatMessage(message, null, null, null, messageFormat, color, notify); - } - - @Override - public SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy) { - List sentMessages = new ArrayList<>(); - HttpRequest request = buildRoomRequest(room, message, proxy); - try { - HttpResponse response = httpClient.execute(request); - sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, - response)); - } catch (Exception e) { - sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); - } - return new SentMessages(name, sentMessages); - } - - private HttpRequest buildRoomRequest(String room, final HipChatMessage message, HttpProxy proxy) { - String urlEncodedRoom = HttpRequest.encodeUrl(room); - HttpRequest.Builder builder = server.httpRequest() - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/room/" + urlEncodedRoom + "/notification") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer " + authToken) - .body(Strings.toString((xbuilder, params) -> { - xbuilder.field("message", message.body); - if (message.format != null) { - xbuilder.field("message_format", message.format.value()); - } - if (message.notify != null) { - xbuilder.field("notify", message.notify); - } - if (message.color != null) { - xbuilder.field("color", String.valueOf(message.color.value())); - } - return xbuilder; - })); - if (proxy != null) { - builder.proxy(proxy); - } - return builder.build(); - } - - static class Defaults { - - @Nullable final Format format; - @Nullable final Color color; - @Nullable final Boolean notify; - - Defaults(Settings settings) { - this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); - this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); - this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java deleted file mode 100644 index ed05c4fe5ad3c..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; - -public class SentMessages implements ToXContentObject, Iterable { - - private static final ParseField ACCOUNT = new ParseField("account"); - private static final ParseField SENT_MESSAGES = new ParseField("sent_messages"); - - private String accountName; - private List messages; - - public SentMessages(String accountName, List messages) { - this.accountName = accountName; - this.messages = messages; - } - - public String getAccountName() { - return accountName; - } - - @Override - public Iterator iterator() { - return messages.iterator(); - } - - public int count() { - return messages.size(); - } - - public List asList() { - return Collections.unmodifiableList(messages); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(ACCOUNT.getPreferredName(), accountName); - builder.startArray(SENT_MESSAGES.getPreferredName()); - for (SentMessage message : messages) { - message.toXContent(builder, params); - } - builder.endArray(); - return builder.endObject(); - } - - public static class SentMessage implements ToXContentObject { - - private static final ParseField STATUS = new ParseField("status"); - private static final ParseField REQUEST = new ParseField("request"); - private static final ParseField RESPONSE = new ParseField("response"); - private static final ParseField MESSAGE = new ParseField("message"); - - public enum TargetType { - ROOM, USER; - - final String fieldName = new String(name().toLowerCase(Locale.ROOT)); - } - - final String targetName; - final TargetType targetType; - final HipChatMessage message; - @Nullable final HttpRequest request; - @Nullable final HttpResponse response; - @Nullable final Exception exception; - - public static SentMessage responded(String targetName, TargetType targetType, HipChatMessage message, HttpRequest request, - HttpResponse response) { - return new SentMessage(targetName, targetType, message, request, response, null); - } - - public static SentMessage error(String targetName, TargetType targetType, HipChatMessage message, Exception e) { - return new SentMessage(targetName, targetType, message, null, null, e); - } - - private SentMessage(String targetName, TargetType targetType, HipChatMessage message, HttpRequest request, HttpResponse response, - Exception exception) { - this.targetName = targetName; - this.targetType = targetType; - this.message = message; - this.request = request; - this.response = response; - this.exception = exception; - } - - public HttpRequest getRequest() { - return request; - } - - public HttpResponse getResponse() { - return response; - } - - public Exception getException() { - return exception; - } - - public boolean isSuccess() { - return response != null && response.status() >= 200 && response.status() < 300; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - boolean success = isSuccess(); - builder.field(STATUS.getPreferredName(), success ? "success" : "failure"); - if (success == false) { - if (request != null) { - if (WatcherParams.hideSecrets(params)) { - // this writes out the request to the byte array output stream with the correct excludes for hipchat - try (InputStream is = HttpRequest.filterToXContent(request, builder.contentType().xContent(), - params, "params.auth_token")) { - builder.rawField(REQUEST.getPreferredName(), is, builder.contentType()); - } - } else { - builder.field(REQUEST.getPreferredName()); - request.toXContent(builder, params); - } - } - if (response != null) { - builder.field(RESPONSE.getPreferredName()); - response.toXContent(builder, params); - } - if (exception != null) { - ElasticsearchException.generateFailureXContent(builder, params, exception, true); - } - } - builder.field(targetType.fieldName, targetName); - builder.field(MESSAGE.getPreferredName()); - message.toXContent(builder, params, false); - return builder.endObject(); - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java deleted file mode 100644 index c0b89cc66ec5f..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -public class UserAccount extends HipChatAccount { - - public static final String TYPE = "user"; - - final Defaults defaults; - - public UserAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { - super(name, Profile.USER, settings, defaultServer, httpClient, logger); - defaults = new Defaults(settings); - } - - @Override - public String type() { - return TYPE; - } - - @Override - public void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template template) throws SettingsException { - if (template.from != null) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. [" - + name + "] hipchat account doesn't support custom `from` fields"); - } - } - - @Override - public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, - Map model) { - String[] rooms = defaults.rooms; - if (template.rooms != null) { - rooms = new String[template.rooms.length]; - for (int i = 0; i < template.rooms.length; i++) { - rooms[i] = engine.render(template.rooms[i], model); - } - } - String[] users = defaults.users; - if (template.users != null) { - users = new String[template.users.length]; - for (int i = 0; i < template.users.length; i++) { - users[i] = engine.render(template.users[i], model); - } - } - String message = engine.render(template.body, model); - Color color = Color.resolve(engine.render(template.color, model), defaults.color); - Boolean notify = template.notify != null ? template.notify : defaults.notify; - Format messageFormat = template.format != null ? template.format : defaults.format; - return new HipChatMessage(message, rooms, users, null, messageFormat, color, notify); - } - - @Override - public SentMessages send(HipChatMessage message, HttpProxy proxy) { - List sentMessages = new ArrayList<>(); - if (message.rooms != null) { - for (String room : message.rooms) { - HttpRequest request = buildRoomRequest(room, message, proxy); - try { - HttpResponse response = httpClient.execute(request); - sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, - response)); - } catch (IOException e) { - logger.error("failed to execute hipchat api http request", e); - sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); - } - } - } - if (message.users != null) { - for (String user : message.users) { - HttpRequest request = buildUserRequest(user, message, proxy); - try { - HttpResponse response = httpClient.execute(request); - sentMessages.add(SentMessages.SentMessage.responded(user, SentMessages.SentMessage.TargetType.USER, message, request, - response)); - } catch (Exception e) { - logger.error("failed to execute hipchat api http request", e); - sentMessages.add(SentMessages.SentMessage.error(user, SentMessages.SentMessage.TargetType.USER, message, e)); - } - } - } - return new SentMessages(name, sentMessages); - } - - public HttpRequest buildRoomRequest(String room, final HipChatMessage message, HttpProxy proxy) { - String urlEncodedRoom = encodeRoom(room); - HttpRequest.Builder builder = server.httpRequest() - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/room/" + urlEncodedRoom + "/notification") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer " + authToken) - .body(Strings.toString((xbuilder, params) -> { - xbuilder.field("message", message.body); - if (message.format != null) { - xbuilder.field("message_format", message.format.value()); - } - if (message.notify != null) { - xbuilder.field("notify", message.notify); - } - if (message.color != null) { - xbuilder.field("color", String.valueOf(message.color.value())); - } - return xbuilder; - })); - if (proxy != null) { - builder.proxy(proxy); - } - return builder.build(); - } - - // this specific hipchat API does not accept application-form encoding, but requires real URL encoding - // spaces must not be replaced with a plus, but rather with %20 - // this workaround ensures, that this happens - private String encodeRoom(String text) { - try { - return new URI("//", "", "", text, null).getRawQuery(); - } catch (URISyntaxException e) { - throw new IllegalArgumentException("failed to URL encode text [" + text + "]", e); - } - - } - - public HttpRequest buildUserRequest(String user, final HipChatMessage message, HttpProxy proxy) { - HttpRequest.Builder builder = server.httpRequest() - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/user/" + user + "/message") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer " + authToken) - .body(Strings.toString((xbuilder, params) -> { - xbuilder.field("message", message.body); - if (message.format != null) { - xbuilder.field("message_format", message.format.value()); - } - if (message.notify != null) { - xbuilder.field("notify", message.notify); - } - return xbuilder; - })); - if (proxy != null) { - builder.proxy(proxy); - } - return builder.build(); - } - - static class Defaults { - - @Nullable final String[] rooms; - @Nullable final String[] users; - @Nullable final Format format; - @Nullable final Color color; - @Nullable final Boolean notify; - - Defaults(Settings settings) { - List rooms = settings.getAsList(DEFAULT_ROOM_SETTING, null); - this.rooms = rooms == null ? null : rooms.toArray(Strings.EMPTY_ARRAY); - List users = settings.getAsList(DEFAULT_USER_SETTING, null); - this.users = users == null ? null : users.toArray(Strings.EMPTY_ARRAY); - this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); - this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); - this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java deleted file mode 100644 index 084cff2d0947c..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; -import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -public class V1Account extends HipChatAccount { - - public static final String TYPE = "v1"; - - final Defaults defaults; - - public V1Account(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { - super(name, Profile.V1, settings, defaultServer, httpClient, logger); - defaults = new Defaults(settings); - } - - @Override - public String type() { - return TYPE; - } - - @Override - public void validateParsedTemplate(String watchId, String actionId, - HipChatMessage.Template template) throws ElasticsearchParseException { - if (template.users != null) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. [" - + name + "] hipchat account doesn't support user private messaging"); - } - if ((template.rooms == null || template.rooms.length == 0) && (defaults.rooms == null || defaults.rooms.length == 0)) { - throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. " + - "missing required [" + HipChatMessage.Field.ROOM + "] field for [" + name + "] hipchat account"); - } - } - - @Override - public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, - Map model) { - String message = engine.render(template.body, model); - String[] rooms = defaults.rooms; - if (template.rooms != null) { - rooms = new String[template.rooms.length]; - for (int i = 0; i < template.rooms.length; i++) { - rooms[i] = engine.render(template.rooms[i], model); - } - } - String from = template.from != null ? template.from : defaults.from != null ? defaults.from : watchId; - Color color = Color.resolve(engine.render(template.color, model), defaults.color); - Boolean notify = template.notify != null ? template.notify : defaults.notify; - Format messageFormat = template.format != null ? template.format : defaults.format; - return new HipChatMessage(message, rooms, null, from, messageFormat, color, notify); - } - - @Override - public SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy) { - List sentMessages = new ArrayList<>(); - if (message.rooms != null) { - for (String room : message.rooms) { - HttpRequest request = buildRoomRequest(room, message, proxy); - try { - HttpResponse response = httpClient.execute(request); - sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, - response)); - } catch (Exception e) { - logger.error("failed to execute hipchat api http request", e); - sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); - } - } - } - return new SentMessages(name, sentMessages); - } - - public HttpRequest buildRoomRequest(String room, HipChatMessage message, HttpProxy proxy) { - HttpRequest.Builder builder = server.httpRequest(); - builder.method(HttpMethod.POST); - builder.scheme(Scheme.HTTPS); - builder.path("/v1/rooms/message"); - builder.setHeader("Content-Type", "application/x-www-form-urlencoded"); - builder.setParam("format", "json"); - builder.setParam("auth_token", authToken); - if (proxy != null) { - builder.proxy(proxy); - } - StringBuilder body = new StringBuilder(); - body.append("room_id=").append(HttpRequest.encodeUrl(room)); - body.append("&from=").append(HttpRequest.encodeUrl(message.from)); - body.append("&message=").append(HttpRequest.encodeUrl(message.body)); - if (message.format != null) { - body.append("&message_format=").append(message.format.value()); - } - if (message.color != null) { - body.append("&color=").append(message.color.value()); - } - if (message.notify != null) { - body.append("¬ify=").append(message.notify ? "1" : "0"); - } - builder.body(body.toString()); - return builder.build(); - } - - static class Defaults { - - @Nullable final String[] rooms; - @Nullable final String from; - @Nullable final Format format; - @Nullable final Color color; - @Nullable final Boolean notify; - - Defaults(Settings settings) { - List rooms = settings.getAsList(DEFAULT_ROOM_SETTING, null); - this.rooms = rooms == null ? null : rooms.toArray(Strings.EMPTY_ARRAY); - this.from = settings.get(DEFAULT_FROM_SETTING); - this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); - this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); - this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java index 0fdb2b3a17d13..4ebcc5a8f4173 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java @@ -17,12 +17,14 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyUtils; @@ -46,29 +48,35 @@ public class WatcherIndexTemplateRegistry implements ClusterStateListener { public static final TemplateConfig TEMPLATE_CONFIG_TRIGGERED_WATCHES = new TemplateConfig( WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, "triggered-watches"); public static final TemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY = new TemplateConfig( - WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, "watch-history"); + WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, "watch-history"); + public static final TemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM = new TemplateConfig( + WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME_NO_ILM, "watch-history-no-ilm"); public static final TemplateConfig TEMPLATE_CONFIG_WATCHES = new TemplateConfig( WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, "watches"); public static final TemplateConfig[] TEMPLATE_CONFIGS = new TemplateConfig[]{ - TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES + TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES + }; + public static final TemplateConfig[] TEMPLATE_CONFIGS_NO_ILM = new TemplateConfig[]{ + TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY_NO_ILM, TEMPLATE_CONFIG_WATCHES }; public static final PolicyConfig POLICY_WATCH_HISTORY = new PolicyConfig("watch-history-ilm-policy", "/watch-history-ilm-policy.json"); private static final Logger logger = LogManager.getLogger(WatcherIndexTemplateRegistry.class); + private final Settings nodeSettings; private final Client client; private final ThreadPool threadPool; - private final TemplateConfig[] indexTemplates; private final NamedXContentRegistry xContentRegistry; private final ConcurrentMap templateCreationsInProgress = new ConcurrentHashMap<>(); private final AtomicBoolean historyPolicyCreationInProgress = new AtomicBoolean(); - public WatcherIndexTemplateRegistry(ClusterService clusterService, ThreadPool threadPool, Client client, + public WatcherIndexTemplateRegistry(Settings nodeSettings, ClusterService clusterService, + ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry) { + this.nodeSettings = nodeSettings; this.client = client; this.threadPool = threadPool; - this.indexTemplates = TEMPLATE_CONFIGS; this.xContentRegistry = xContentRegistry; clusterService.addListener(this); } @@ -100,6 +108,8 @@ public void clusterChanged(ClusterChangedEvent event) { } private void addTemplatesIfMissing(ClusterState state) { + boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings); + final TemplateConfig[] indexTemplates = ilmSupported ? TEMPLATE_CONFIGS : TEMPLATE_CONFIGS_NO_ILM; for (TemplateConfig template : indexTemplates) { final String templateName = template.getTemplateName(); final AtomicBoolean creationCheck = templateCreationsInProgress.computeIfAbsent(templateName, key -> new AtomicBoolean(false)); @@ -147,7 +157,8 @@ LifecyclePolicy loadWatcherHistoryPolicy() { } private void addIndexLifecyclePolicyIfMissing(ClusterState state) { - if (historyPolicyCreationInProgress.compareAndSet(false, true)) { + boolean ilmSupported = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(this.nodeSettings); + if (ilmSupported && historyPolicyCreationInProgress.compareAndSet(false, true)) { final LifecyclePolicy policyOnDisk = loadWatcherHistoryPolicy(); Optional maybeMeta = Optional.ofNullable(state.metaData().custom(IndexLifecycleMetadata.TYPE)); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java index 6683f085fe228..36643ed18f8d8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java @@ -75,7 +75,7 @@ protected void doExecute(AckWatchRequest request, ActionListener listener) { try { ZonedDateTime now = clock.instant().atZone(ZoneOffset.UTC); - UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()); + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, request.getWatchId()); updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); XContentBuilder builder = activateWatchBuilder(request.isActivate(), now); updateRequest.doc(builder); @@ -72,7 +72,7 @@ protected void doExecute(ActivateWatchRequest request, ActionListenerwrap(updateResponse -> { - GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()) + GetRequest getRequest = new GetRequest(Watch.INDEX, request.getWatchId()) .preference(Preference.LOCAL.type()).realtime(true); executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java index af98f680aead2..2f97fcb4f18aa 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java @@ -42,7 +42,7 @@ public TransportDeleteWatchAction(TransportService transportService, ActionFilte @Override protected void doExecute(Task task, DeleteWatchRequest request, ActionListener listener) { - DeleteRequest deleteRequest = new DeleteRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); + DeleteRequest deleteRequest = new DeleteRequest(Watch.INDEX, request.getId()); deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, deleteRequest, ActionListener.wrap(deleteResponse -> { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java index 9e1f160b06e4d..a049267594611 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; @@ -81,7 +82,7 @@ public TransportExecuteWatchAction(TransportService transportService, ThreadPool @Override protected void doExecute(ExecuteWatchRequest request, ActionListener listener) { if (request.getId() != null) { - GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()) + GetRequest getRequest = new GetRequest(Watch.INDEX, request.getId()) .preference(Preference.LOCAL.type()).realtime(true); executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, @@ -110,48 +111,63 @@ protected void doExecute(ExecuteWatchRequest request, ActionListener listener, - Watch watch, boolean knownWatch) { - - threadPool.executor(XPackField.WATCHER).submit(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + private void executeWatch( + final ExecuteWatchRequest request, + final ActionListener listener, + final Watch watch, + final boolean knownWatch) { + try { + /* + * Ensure that the headers from the incoming request are used instead those of the stored watch otherwise the watch would run + * as the user who stored the watch, but it needs to run as the user who executes this request. + */ + final Map headers = new HashMap<>(threadPool.getThreadContext().getHeaders()); + watch.status().setHeaders(headers); + + final String triggerType = watch.trigger().type(); + final TriggerEvent triggerEvent = triggerService.simulateEvent(triggerType, watch.id(), request.getTriggerData()); + + final ManualExecutionContext.Builder ctxBuilder = ManualExecutionContext.builder( + watch, + knownWatch, + new ManualTriggerEvent(triggerEvent.jobName(), triggerEvent), executionService.defaultThrottlePeriod()); + + final ZonedDateTime executionTime = clock.instant().atZone(ZoneOffset.UTC); + ctxBuilder.executionTime(executionTime); + for (final Map.Entry entry : request.getActionModes().entrySet()) { + ctxBuilder.actionMode(entry.getKey(), entry.getValue()); + } + if (request.getAlternativeInput() != null) { + ctxBuilder.withInput(new SimpleInput.Result(new Payload.Simple(request.getAlternativeInput()))); } + if (request.isIgnoreCondition()) { + ctxBuilder.withCondition(InternalAlwaysCondition.RESULT_INSTANCE); + } + ctxBuilder.recordExecution(request.isRecordExecution()); + final WatchExecutionContext ctx = ctxBuilder.build(); - @Override - protected void doRun() throws Exception { - // ensure that the headers from the incoming request are used instead those of the stored watch - // otherwise the watch would run as the user who stored the watch, but it needs to be run as the user who - // executes this request - Map headers = new HashMap<>(threadPool.getThreadContext().getHeaders()); - watch.status().setHeaders(headers); + // use execute so that the runnable is not wrapped in a RunnableFuture + threadPool.executor(XPackField.WATCHER).execute(new ExecutionService.WatchExecutionTask(ctx, new AbstractRunnable() { - String triggerType = watch.trigger().type(); - TriggerEvent triggerEvent = triggerService.simulateEvent(triggerType, watch.id(), request.getTriggerData()); + @Override + public void onFailure(final Exception e) { + listener.onFailure(e); + } - ManualExecutionContext.Builder ctxBuilder = ManualExecutionContext.builder(watch, knownWatch, - new ManualTriggerEvent(triggerEvent.jobName(), triggerEvent), executionService.defaultThrottlePeriod()); + @Override + protected void doRun() throws Exception { + final WatchRecord record = executionService.execute(ctx); + final XContentBuilder builder = XContentFactory.jsonBuilder(); - ZonedDateTime executionTime = clock.instant().atZone(ZoneOffset.UTC); - ctxBuilder.executionTime(executionTime); - for (Map.Entry entry : request.getActionModes().entrySet()) { - ctxBuilder.actionMode(entry.getKey(), entry.getValue()); + record.toXContent(builder, WatcherParams.builder().hideSecrets(true).debug(request.isDebug()).build()); + listener.onResponse(new ExecuteWatchResponse(record.id().value(), BytesReference.bytes(builder), XContentType.JSON)); } - if (request.getAlternativeInput() != null) { - ctxBuilder.withInput(new SimpleInput.Result(new Payload.Simple(request.getAlternativeInput()))); - } - if (request.isIgnoreCondition()) { - ctxBuilder.withCondition(InternalAlwaysCondition.RESULT_INSTANCE); - } - ctxBuilder.recordExecution(request.isRecordExecution()); - WatchRecord record = executionService.execute(ctxBuilder.build()); - XContentBuilder builder = XContentFactory.jsonBuilder(); + })); + } catch (final Exception e) { + listener.onFailure(e); + } + - record.toXContent(builder, WatcherParams.builder().hideSecrets(true).debug(request.isDebug()).build()); - listener.onResponse(new ExecuteWatchResponse(record.id().value(), BytesReference.bytes(builder), XContentType.JSON)); - } - }); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java index 85f899b915e6e..b8d6be360de9a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java @@ -52,7 +52,7 @@ public TransportGetWatchAction(TransportService transportService, ActionFilters @Override protected void doExecute(GetWatchRequest request, ActionListener listener) { - GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()) + GetRequest getRequest = new GetRequest(Watch.INDEX, request.getId()) .preference(Preference.LOCAL.type()).realtime(true); executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, @@ -61,7 +61,7 @@ protected void doExecute(GetWatchRequest request, ActionListener engines; - private final Map perWatchStats = new HashMap<>(); + private final Map perWatchStats = new ConcurrentHashMap<>(); public TriggerService(Set engines) { Map builder = new HashMap<>(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java new file mode 100644 index 0000000000000..237c0a2bdf153 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherConcreteIndexTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Locale; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.noneInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.greaterThan; + +public class WatcherConcreteIndexTests extends AbstractWatcherIntegrationTestCase { + + public void testCanUseAnyConcreteIndexName() throws Exception { + String newWatcherIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String watchResultsIndex = randomAlphaOfLength(11).toLowerCase(Locale.ROOT); + createIndex(watchResultsIndex); + + stopWatcher(); + replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, newWatcherIndexName); + ensureGreen(newWatcherIndexName); + startWatcher(); + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("mywatch").setSource(watchBuilder() + .trigger(schedule(interval("3s"))) + .input(noneInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("indexer", indexAction(watchResultsIndex, "_doc"))) + .get(); + + assertTrue(putWatchResponse.isCreated()); + refresh(); + + timeWarp().trigger("mywatch"); + + assertBusy(() -> { + SearchResponse searchResult = client().prepareSearch(watchResultsIndex).setTrackTotalHits(true).get(); + assertThat((int) searchResult.getHits().getTotalHits().value, greaterThan(0)); + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java index 5bc441bfa289a..6cea483995ac9 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; @@ -41,7 +42,6 @@ import org.junit.Before; import java.io.IOException; -import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.BitSet; @@ -95,20 +95,7 @@ public void setup() throws Exception { listener.setConfiguration(new Configuration(Watch.INDEX, map)); } - // - // tests for document level operations - // - public void testPreIndexCheckType() throws Exception { - when(shardId.getIndexName()).thenReturn(Watch.INDEX); - when(operation.type()).thenReturn(randomAlphaOfLength(10)); - - Engine.Index index = listener.preIndex(shardId, operation); - assertThat(index, is(operation)); - verifyZeroInteractions(parser); - } - public void testPreIndexCheckIndex() throws Exception { - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(shardId.getIndexName()).thenReturn(randomAlphaOfLength(10)); Engine.Index index = listener.preIndex(shardId, operation); @@ -118,7 +105,6 @@ public void testPreIndexCheckIndex() throws Exception { public void testPreIndexCheckActive() throws Exception { listener.setConfiguration(INACTIVE); - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(shardId.getIndexName()).thenReturn(Watch.INDEX); Engine.Index index = listener.preIndex(shardId, operation); @@ -127,7 +113,6 @@ public void testPreIndexCheckActive() throws Exception { } public void testPreIndex() throws Exception { - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(operation.id()).thenReturn(randomAlphaOfLength(10)); when(operation.source()).thenReturn(BytesArray.EMPTY); when(shardId.getIndexName()).thenReturn(Watch.INDEX); @@ -139,8 +124,7 @@ public void testPreIndex() throws Exception { Engine.Index returnedOperation = listener.preIndex(shardId, operation); assertThat(returnedOperation, is(operation)); - - ZonedDateTime now = clock.instant().atZone(ZoneOffset.UTC); + ZonedDateTime now = DateUtils.nowWithMillisResolution(clock); verify(parser).parseWithSecrets(eq(operation.id()), eq(true), eq(BytesArray.EMPTY), eq(now), anyObject(), anyLong(), anyLong()); if (isNewWatch) { @@ -162,7 +146,6 @@ public void testPreIndexWatchGetsOnlyTriggeredOnceAcrossAllShards() throws Excep Watch watch = mockWatch(id, watchActive, isNewWatch); when(shardId.getIndexName()).thenReturn(Watch.INDEX); - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject(), anyLong(), anyLong())).thenReturn(watch); for (int idx = 0; idx < totalShardCount; idx++) { @@ -203,7 +186,6 @@ private Watch mockWatch(String id, boolean active, boolean isNewWatch) { } public void testPreIndexCheckParsingException() throws Exception { - when(operation.type()).thenReturn(Watch.DOC_TYPE); String id = randomAlphaOfLength(10); when(operation.id()).thenReturn(id); when(operation.source()).thenReturn(BytesArray.EMPTY); @@ -219,7 +201,6 @@ public void testPreIndexCheckParsingException() throws Exception { public void testPostIndexRemoveTriggerOnException() throws Exception { when(operation.id()).thenReturn("_id"); - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(shardId.getIndexName()).thenReturn(Watch.INDEX); listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever")); @@ -228,7 +209,6 @@ public void testPostIndexRemoveTriggerOnException() throws Exception { public void testPostIndexDontInvokeForOtherDocuments() throws Exception { when(operation.id()).thenReturn("_id"); - when(operation.type()).thenReturn(Watch.DOC_TYPE); when(shardId.getIndexName()).thenReturn("anything"); when(result.getResultType()).thenReturn(Engine.Result.Type.SUCCESS); @@ -251,18 +231,8 @@ public void testPreDeleteCheckIndex() throws Exception { verifyZeroInteractions(triggerService); } - public void testPreDeleteCheckType() throws Exception { - when(shardId.getIndexName()).thenReturn(Watch.INDEX); - when(delete.type()).thenReturn(randomAlphaOfLength(10)); - - listener.preDelete(shardId, delete); - - verifyZeroInteractions(triggerService); - } - public void testPreDelete() throws Exception { when(shardId.getIndexName()).thenReturn(Watch.INDEX); - when(delete.type()).thenReturn(Watch.DOC_TYPE); when(delete.id()).thenReturn("_id"); listener.preDelete(shardId, delete); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index b13b035304d7c..a4131889f84a0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -81,6 +81,8 @@ public void testWatcherDisabledTests() throws Exception { // also no component creation if not enabled assertThat(watcher.createComponents(null, null, null, null, null, null, null, null, null), hasSize(0)); + + watcher.close(); } public void testThreadPoolSize() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 3b8d844cc1241..3d1fe78e27a00 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -231,8 +231,10 @@ public void testPausingWatcherServiceAlsoPausesTriggerService() { Trigger trigger = mock(Trigger.class); when(trigger.type()).thenReturn(engineType); + final String id = randomAlphaOfLengthBetween(3, 12); Watch watch = mock(Watch.class); when(watch.trigger()).thenReturn(trigger); + when(watch.id()).thenReturn(id); when(watch.condition()).thenReturn(InternalAlwaysCondition.INSTANCE); ExecutableNoneInput noneInput = new ExecutableNoneInput(); when(watch.input()).thenReturn(noneInput); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java index a753bf05b8229..393932f8c8ce6 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java @@ -68,7 +68,7 @@ public void testTimeThrottleDefaults() { .setSource(watchBuilder() .trigger(schedule(interval("1s"))) .input(simpleInput()) - .addAction("my-logging-action", indexAction("my_watcher_index", "action"))) + .addAction("my-logging-action", indexAction("my_watcher_index"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java deleted file mode 100644 index 02b7dec72aa91..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.actions.hipchat; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; -import org.junit.Before; - -import java.util.HashSet; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.hipchatAction; -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class HipChatActionFactoryTests extends ESTestCase { - private HipChatActionFactory factory; - private HipChatService hipchatService; - - @Before - public void init() throws Exception { - hipchatService = mock(HipChatService.class); - factory = new HipChatActionFactory(mock(TextTemplateEngine.class), hipchatService); - } - - public void testParseAction() throws Exception { - HipChatAccount account = mock(HipChatAccount.class); - when(hipchatService.getAccount("_account1")).thenReturn(account); - - HipChatAction action = hipchatAction("_account1", "_body").build(); - XContentBuilder jsonBuilder = jsonBuilder().value(action); - XContentParser parser = createParser(jsonBuilder); - parser.nextToken(); - - ExecutableHipChatAction parsedAction = factory.parseExecutable("_w1", "_a1", parser); - assertThat(parsedAction.action(), is(action)); - - verify(account, times(1)).validateParsedTemplate("_w1", "_a1", action.message); - } - - public void testParseActionUnknownAccount() throws Exception { - hipchatService = new HipChatService(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, - new HashSet<>(HipChatService.getSettings()))); - factory = new HipChatActionFactory(mock(TextTemplateEngine.class), hipchatService); - HipChatAction action = hipchatAction("_unknown", "_body").build(); - XContentBuilder jsonBuilder = jsonBuilder().value(action); - XContentParser parser = createParser(jsonBuilder); - parser.nextToken(); - expectThrows(IllegalArgumentException.class, () -> factory.parseExecutable("_w1", "_a1", parser)); - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java deleted file mode 100644 index f36f00b54dd8c..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.actions.hipchat; - -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.script.JodaCompatibleZonedDateTime; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.watcher.actions.Action; -import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.core.watcher.execution.Wid; -import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; -import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; -import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; -import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; -import org.junit.Before; - -import java.io.IOException; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class HipChatActionTests extends ESTestCase { - private HipChatService service; - - @Before - public void init() throws Exception { - service = mock(HipChatService.class); - } - - public void testExecute() throws Exception { - final String accountName = "account1"; - - TextTemplateEngine templateEngine = mock(TextTemplateEngine.class); - - TextTemplate body = new TextTemplate("_body"); - HipChatMessage.Template.Builder messageBuilder = new HipChatMessage.Template.Builder(body); - - HipChatMessage.Template messageTemplate = messageBuilder.build(); - - HipChatAction action = new HipChatAction(accountName, messageTemplate, null); - ExecutableHipChatAction executable = new ExecutableHipChatAction(action, logger, service, templateEngine); - - Map data = new HashMap<>(); - Payload payload = new Payload.Simple(data); - - Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); - - ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - JodaCompatibleZonedDateTime jodaJavaNow = new JodaCompatibleZonedDateTime(now.toInstant(), ZoneOffset.UTC); - - Wid wid = new Wid(randomAlphaOfLength(5), now); - WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) - .wid(wid) - .payload(payload) - .time(wid.watchId(), now) - .metadata(metadata) - .buildMock(); - - Map triggerModel = new HashMap<>(); - triggerModel.put("triggered_time", jodaJavaNow); - triggerModel.put("scheduled_time", jodaJavaNow); - Map ctxModel = new HashMap<>(); - ctxModel.put("id", ctx.id().value()); - ctxModel.put("watch_id", wid.watchId()); - ctxModel.put("payload", data); - ctxModel.put("metadata", metadata); - ctxModel.put("execution_time", jodaJavaNow); - ctxModel.put("trigger", triggerModel); - ctxModel.put("vars", Collections.emptyMap()); - Map expectedModel = singletonMap("ctx", ctxModel); - - if (body != null) { - when(templateEngine.render(body, expectedModel)).thenReturn(body.getTemplate()); - } - - String[] rooms = new String[] { "_r1" }; - HipChatMessage message = new HipChatMessage(body.getTemplate(), rooms, null, null, null, null, null); - HipChatAccount account = mock(HipChatAccount.class); - when(account.render(wid.watchId(), "_id", templateEngine, messageTemplate, expectedModel)).thenReturn(message); - boolean responseFailure = randomBoolean(); - HttpResponse response = new HttpResponse(responseFailure ? 404 : 200); - HttpRequest request = HttpRequest.builder("localhost", 12345).path("/").build(); - SentMessages sentMessages = new SentMessages(accountName, Arrays.asList( - SentMessages.SentMessage.responded("_r1", SentMessages.SentMessage.TargetType.ROOM, message, request, response) - )); - when(account.send(message, null)).thenReturn(sentMessages); - when(service.getAccount(accountName)).thenReturn(account); - - Action.Result result = executable.execute("_id", ctx, payload); - - assertThat(result, notNullValue()); - assertThat(result, instanceOf(HipChatAction.Result.Executed.class)); - if (responseFailure) { - assertThat(result.status(), equalTo(Action.Result.Status.FAILURE)); - } else { - assertThat(result.status(), equalTo(Action.Result.Status.SUCCESS)); - } - assertThat(((HipChatAction.Result.Executed) result).sentMessages(), sameInstance(sentMessages)); - assertValidToXContent(result); - } - - public void testParser() throws Exception { - XContentBuilder builder = jsonBuilder().startObject(); - - String accountName = randomAlphaOfLength(10); - builder.field("account", accountName); - builder.startObject("message"); - - TextTemplate body = new TextTemplate("_body"); - builder.field("body", body); - - TextTemplate[] rooms = null; - if (randomBoolean()) { - TextTemplate r1 = new TextTemplate("_r1"); - TextTemplate r2 = new TextTemplate("_r2"); - rooms = new TextTemplate[] { r1, r2 }; - builder.array("room", r1, r2); - } - TextTemplate[] users = null; - if (randomBoolean()) { - TextTemplate u1 = new TextTemplate("_u1"); - TextTemplate u2 = new TextTemplate("_u2"); - users = new TextTemplate[] { u1, u2 }; - builder.array("user", u1, u2); - } - String from = null; - if (randomBoolean()) { - from = randomAlphaOfLength(10); - builder.field("from", from); - } - HipChatMessage.Format format = null; - if (randomBoolean()) { - format = randomFrom(HipChatMessage.Format.values()); - builder.field("format", format.value()); - } - TextTemplate color = null; - if (randomBoolean()) { - color = new TextTemplate(randomFrom(HipChatMessage.Color.values()).value()); - builder.field("color", color); - } - Boolean notify = null; - if (randomBoolean()) { - notify = randomBoolean(); - builder.field("notify", notify); - } - builder.endObject(); - HttpProxy proxy = null; - if (randomBoolean()) { - proxy = new HttpProxy("localhost", 8080); - builder.startObject("proxy").field("host", "localhost").field("port", 8080).endObject(); - } - builder.endObject(); - - BytesReference bytes = BytesReference.bytes(builder); - logger.info("hipchat action json [{}]", bytes.utf8ToString()); - XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); - parser.nextToken(); - - HipChatAction action = HipChatAction.parse("_watch", "_action", parser); - - assertThat(action, notNullValue()); - assertThat(action.account, is(accountName)); - assertThat(action.proxy, is(proxy)); - assertThat(action.message, notNullValue()); - assertThat(action.message, is(new HipChatMessage.Template(body, rooms, users, from, format, color, notify))); - } - - public void testParserSelfGenerated() throws Exception { - String accountName = randomAlphaOfLength(10); - TextTemplate body = new TextTemplate("_body"); - HipChatMessage.Template.Builder templateBuilder = new HipChatMessage.Template.Builder(body); - - XContentBuilder builder = jsonBuilder().startObject(); - builder.field("account", accountName); - - HttpProxy proxy = null; - if (randomBoolean()) { - proxy = new HttpProxy("localhost", 8080); - builder.startObject("proxy").field("host", "localhost").field("port", 8080).endObject(); - } - - builder.startObject("message"); - builder.field("body", body); - - if (randomBoolean()) { - TextTemplate r1 = new TextTemplate("_r1"); - TextTemplate r2 = new TextTemplate("_r2"); - templateBuilder.addRooms(r1, r2); - builder.array("room", r1, r2); - } - if (randomBoolean()) { - TextTemplate u1 = new TextTemplate("_u1"); - TextTemplate u2 = new TextTemplate("_u2"); - templateBuilder.addUsers(u1, u2); - builder.array("user", u1, u2); - } - if (randomBoolean()) { - String from = randomAlphaOfLength(10); - templateBuilder.setFrom(from); - builder.field("from", from); - } - if (randomBoolean()) { - HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); - templateBuilder.setFormat(format); - builder.field("format", format.value()); - } - if (randomBoolean()) { - TextTemplate color = new TextTemplate(randomFrom(HipChatMessage.Color.values()).value()); - templateBuilder.setColor(color); - builder.field("color", color); - } - if (randomBoolean()) { - boolean notify = randomBoolean(); - templateBuilder.setNotify(notify); - builder.field("notify", notify); - } - - builder.endObject(); - builder.endObject(); - - HipChatMessage.Template template = templateBuilder.build(); - - HipChatAction action = new HipChatAction(accountName, template, proxy); - - XContentBuilder jsonBuilder = jsonBuilder(); - action.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - BytesReference bytes = BytesReference.bytes(builder); - logger.info("{}", bytes.utf8ToString()); - XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); - parser.nextToken(); - - HipChatAction parsedAction = HipChatAction.parse("_watch", "_action", parser); - - assertThat(parsedAction, notNullValue()); - assertThat(parsedAction, is(action)); - } - - public void testParserInvalid() throws Exception { - XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); - XContentParser parser = createParser(builder); - parser.nextToken(); - try { - HipChatAction.parse("_watch", "_action", parser); - fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), is("failed to parse [hipchat] action [_watch/_action]. unexpected token [VALUE_STRING]")); - } - } - - // ensure that toXContent can be serialized and read again - private void assertValidToXContent(Action.Result result) throws IOException { - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - result.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - Strings.toString(builder); - try (XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Strings.toString(builder))) { - parser.map(); - } - } - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java index da702e01047bd..0ecc3cd412f6d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -28,6 +29,7 @@ import org.elasticsearch.xpack.core.watcher.actions.Action; import org.elasticsearch.xpack.core.watcher.actions.Action.Result.Status; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; @@ -285,7 +287,7 @@ public void testIndexActionExecuteSingleDoc() throws Exception { refreshPolicy); ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); - ZonedDateTime executionTime = ZonedDateTime.now(ZoneOffset.UTC); + ZonedDateTime executionTime = DateUtils.nowWithMillisResolution(); Payload payload; if (customId && docIdAsParam == false) { @@ -325,7 +327,7 @@ public void testIndexActionExecuteSingleDoc() throws Exception { if (timestampField != null) { assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(2))); - assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, executionTime.toString())); + assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, WatcherDateTimeUtils.formatDate(executionTime))); } else { assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(1))); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java index 7e5a8b989105d..1834b70d92fcb 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java @@ -301,7 +301,7 @@ public void testFailingActionDoesGetThrottled() throws Exception { new IntervalSchedule.Interval(60, IntervalSchedule.Interval.Unit.MINUTES)))) .defaultThrottlePeriod(throttlePeriod) .addAction("logging", loggingAction("test out")) - .addAction("failing_hook", indexAction("foo", "bar").setExecutionTimeField("@timestamp"))) + .addAction("failing_hook", indexAction("foo").setExecutionTimeField("@timestamp"))) .get(); refresh(Watch.INDEX); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java index 4e62eedd221d2..bdaa2377fd1d7 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java index d26d07a8fbdb6..2c961db6187fe 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -17,9 +17,9 @@ import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.http.HttpMethod; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; -import org.elasticsearch.xpack.watcher.common.http.BasicAuth; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; @@ -147,7 +147,7 @@ public void testWebhookWithTimebasedIndex() throws Exception { String host = publishAddress.address().getHostString(); HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder(host, publishAddress.getPort()) - .path(new TextTemplate("/%3Clogstash-%7Bnow%2Fd%7D%3E/log/1")) + .path(new TextTemplate("/%3Clogstash-%7Bnow%2Fd%7D%3E/_doc/1")) .body(new TextTemplate("{\"foo\":\"bar\"}")) .putHeader("Content-Type", new TextTemplate("application/json")) .method(HttpMethod.PUT); @@ -162,7 +162,7 @@ public void testWebhookWithTimebasedIndex() throws Exception { watcherClient().prepareExecuteWatch("_id").get(); - GetResponse response = client().prepareGet("", "log", "1").get(); + GetResponse response = client().prepareGet().setIndex("").setId("1").get(); assertExists(response); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecuteWatchQueuedStatsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecuteWatchQueuedStatsTests.java new file mode 100644 index 0000000000000..d4686652eaeff --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecuteWatchQueuedStatsTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; + +import java.io.IOException; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.empty; + +public class ExecuteWatchQueuedStatsTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + // we use a small thread pool to force executions to be queued + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("xpack.watcher.thread_pool.size", 1).build(); + } + + @Override + protected boolean timeWarped() { + return false; + } + + /* + * This test is effectively forcing a manually executed watch to end up queued while we simultaneously try to get stats, including + * queued watches. The reason that we do this is because previously a manually executed watch would be queued as a FutureTask while + * we try to cast queued watches to WatchExecutionTask. This would previously result in a ClassCastException. This test fails when that + * happens yet succeeds with the production code change that accompanies this test. + */ + public void testQueuedStats() throws ExecutionException, InterruptedException { + final WatcherClient client = new WatcherClient(client()); + client.preparePutWatch("id") + .setActive(true) + .setSource( + new WatchSourceBuilder() + .input(simpleInput("payload", "yes")) + .trigger(schedule(interval("1s"))) + .addAction( + "action", + TimeValue.timeValueSeconds(1), + IndexAction.builder("test_index", "acknowledgement").setDocId("id"))) + .get(); + + final int numberOfIterations = 128 - scaledRandomIntBetween(0, 128); + + final CyclicBarrier barrier = new CyclicBarrier(2); + + final List> futures = new ArrayList<>(numberOfIterations); + final Thread executeWatchThread = new Thread(() -> { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + fail(e.toString()); + } + for (int i = 0; i < numberOfIterations; i++) { + final ExecuteWatchRequest request = new ExecuteWatchRequest("id"); + try { + request.setTriggerEvent(new ManualTriggerEvent( + "id-" + i, + new ScheduleTriggerEvent(ZonedDateTime.now(ZoneOffset.UTC), ZonedDateTime.now(ZoneOffset.UTC)))); + } catch (final IOException e) { + fail(e.toString()); + } + request.setActionMode("_all", ActionExecutionMode.EXECUTE); + request.setRecordExecution(true); + futures.add(client.executeWatch(request)); + } + }); + executeWatchThread.start(); + + final List failures = new ArrayList<>(); + final Thread watcherStatsThread = new Thread(() -> { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + fail(e.toString()); + } + for (int i = 0; i < numberOfIterations; i++) { + final WatcherStatsResponse response = client.prepareWatcherStats().setIncludeQueuedWatches(true).get(); + failures.addAll(response.failures()); + } + }); + watcherStatsThread.start(); + + executeWatchThread.join(); + watcherStatsThread.join(); + + for (final ActionFuture future : futures) { + future.get(); + } + + assertThat(failures, empty()); + + client.prepareDeleteWatch("id").get(); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index 321cf979bca73..80cb657a5762e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -5,11 +5,13 @@ */ package org.elasticsearch.xpack.watcher.execution; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -28,8 +30,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -48,6 +53,7 @@ import org.elasticsearch.xpack.core.watcher.execution.ExecutionPhase; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; import org.elasticsearch.xpack.core.watcher.execution.QueuedWatch; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; import org.elasticsearch.xpack.core.watcher.execution.Wid; @@ -91,6 +97,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -844,11 +851,15 @@ public void testThatTriggeredWatchDeletionWorksOnExecutionRejection() throws Exc when(getResponse.isExists()).thenReturn(true); when(getResponse.getId()).thenReturn("foo"); mockGetWatchResponse(client, "foo", getResponse); + ActionFuture actionFuture = mock(ActionFuture.class); + when(actionFuture.get()).thenReturn(""); + when(client.index(any())).thenReturn(actionFuture); + when(client.delete(any())).thenReturn(actionFuture); + when(parser.parseWithSecrets(eq("foo"), eq(true), any(), any(), any(), anyLong(), anyLong())).thenReturn(watch); - // execute needs to fail as well as storing the history + // execute needs to fail doThrow(new EsRejectedExecutionException()).when(executor).execute(any()); - doThrow(new ElasticsearchException("whatever")).when(historyStore).forcePut(any()); Wid wid = new Wid(watch.id(), ZonedDateTime.now(ZoneOffset.UTC)); @@ -856,10 +867,58 @@ public void testThatTriggeredWatchDeletionWorksOnExecutionRejection() throws Exc new ScheduleTriggerEvent(ZonedDateTime.now(ZoneOffset.UTC) ,ZonedDateTime.now(ZoneOffset.UTC))); executionService.executeTriggeredWatches(Collections.singleton(triggeredWatch)); - verify(triggeredWatchStore, times(1)).delete(wid); - ArgumentCaptor captor = ArgumentCaptor.forClass(WatchRecord.class); - verify(historyStore, times(1)).forcePut(captor.capture()); - assertThat(captor.getValue().state(), is(ExecutionState.THREADPOOL_REJECTION)); + ArgumentCaptor deleteCaptor = ArgumentCaptor.forClass(DeleteRequest.class); + verify(client).delete(deleteCaptor.capture()); + assertThat(deleteCaptor.getValue().index(), equalTo(TriggeredWatchStoreField.INDEX_NAME)); + assertThat(deleteCaptor.getValue().id(), equalTo(wid.value())); + + ArgumentCaptor watchHistoryCaptor = ArgumentCaptor.forClass(IndexRequest.class); + verify(client).index(watchHistoryCaptor.capture()); + + assertThat(watchHistoryCaptor.getValue().source().utf8ToString(), containsString(ExecutionState.THREADPOOL_REJECTION.toString())); + assertThat(watchHistoryCaptor.getValue().index(), containsString(".watcher-history")); + } + + public void testForcePutHistoryOnExecutionRejection() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("foo"); + WatchStatus status = new WatchStatus(ZonedDateTime.now(ZoneOffset.UTC), Collections.emptyMap()); + when(watch.status()).thenReturn(status); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + when(getResponse.getId()).thenReturn("foo"); + mockGetWatchResponse(client, "foo", getResponse); + ActionFuture actionFuture = mock(ActionFuture.class); + when(actionFuture.get()).thenReturn(""); + when(client.index(any())) + .thenThrow(new VersionConflictEngineException( + new ShardId(new Index("mockindex", "mockuuid"), 0), "id", "explaination")) + .thenReturn(actionFuture); + when(client.delete(any())).thenReturn(actionFuture); + + when(parser.parseWithSecrets(eq("foo"), eq(true), any(), any(), any(), anyLong(), anyLong())).thenReturn(watch); + + // execute needs to fail + doThrow(new EsRejectedExecutionException()).when(executor).execute(any()); + + Wid wid = new Wid(watch.id(), ZonedDateTime.now(ZoneOffset.UTC)); + + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, + new ScheduleTriggerEvent(ZonedDateTime.now(ZoneOffset.UTC), ZonedDateTime.now(ZoneOffset.UTC))); + executionService.executeTriggeredWatches(Collections.singleton(triggeredWatch)); + + ArgumentCaptor deleteCaptor = ArgumentCaptor.forClass(DeleteRequest.class); + verify(client).delete(deleteCaptor.capture()); + assertThat(deleteCaptor.getValue().index(), equalTo(TriggeredWatchStoreField.INDEX_NAME)); + assertThat(deleteCaptor.getValue().id(), equalTo(wid.value())); + + ArgumentCaptor watchHistoryCaptor = ArgumentCaptor.forClass(IndexRequest.class); + verify(client, times(2)).index(watchHistoryCaptor.capture()); + List indexRequests = watchHistoryCaptor.getAllValues(); + + assertThat(indexRequests.get(0).id(), equalTo(indexRequests.get(1).id())); + assertThat(indexRequests.get(0).source().utf8ToString(), containsString(ExecutionState.THREADPOOL_REJECTION.toString())); + assertThat(indexRequests.get(1).source().utf8ToString(), containsString(ExecutionState.EXECUTED_MULTIPLE_TIMES.toString())); } public void testThatTriggeredWatchDeletionHappensOnlyIfWatchExists() throws Exception { @@ -898,7 +957,7 @@ public void testThatTriggeredWatchDeletionHappensOnlyIfWatchExists() throws Exce when(watch.status()).thenReturn(watchStatus); executionService.execute(context); - verify(triggeredWatchStore, never()).delete(any()); + verify(client, never()).delete(any()); } public void testThatSingleWatchCannotBeExecutedConcurrently() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 84d4ebd274dc0..13d46c4d97f2d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -80,6 +80,7 @@ import java.util.Map; import static java.util.Collections.singleton; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -214,7 +215,7 @@ public void testFindTriggeredWatchesGoodCase() { when(searchResponse1.getSuccessfulShards()).thenReturn(1); when(searchResponse1.getTotalShards()).thenReturn(1); BytesArray source = new BytesArray("{}"); - SearchHit hit = new SearchHit(0, "first_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); + SearchHit hit = new SearchHit(0, "first_foo", new Text(SINGLE_MAPPING_NAME), null); hit.version(1L); hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null, OriginalIndices.NONE)); hit.sourceRef(source); @@ -228,7 +229,7 @@ public void testFindTriggeredWatchesGoodCase() { }).when(client).execute(eq(SearchAction.INSTANCE), any(), any()); // First return a scroll response with a single hit and then with no hits - hit = new SearchHit(0, "second_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); + hit = new SearchHit(0, "second_foo", new Text(SINGLE_MAPPING_NAME), null); hit.version(1L); hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null, OriginalIndices.NONE)); hit.sourceRef(source); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java index 866ec32cff0b8..790f83e200479 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -91,7 +91,7 @@ public void testPut() throws Exception { ActionListener listener = (ActionListener) invocation.getArguments()[2]; IndexRequest indexRequest = (IndexRequest) request.requests().get(0); - if (indexRequest.id().equals(wid.value()) && indexRequest.type().equals(HistoryStore.DOC_TYPE) && + if (indexRequest.id().equals(wid.value()) && indexRequest.opType() == OpType.CREATE && indexRequest.index().equals(index)) { listener.onResponse(new BulkResponse(new BulkItemResponse[]{ new BulkItemResponse(1, OpType.CREATE, indexResponse) }, 1)); } else { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java index 9d2b767891aff..bcae850e17c22 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -158,8 +159,8 @@ public void testExceptionMapping() { Iterator> iterator = mappingsResponse.getMappings().valuesIt(); while (iterator.hasNext()) { ImmutableOpenMap mapping = iterator.next(); - assertThat(mapping.containsKey("doc"), is(true)); - Map docMapping = mapping.get("doc").getSourceAsMap(); + assertThat(mapping.containsKey(SINGLE_MAPPING_NAME), is(true)); + Map docMapping = mapping.get(SINGLE_MAPPING_NAME).getSourceAsMap(); if (abortAtInput) { Boolean enabled = ObjectPath.eval("properties.result.properties.input.properties.error.enabled", docMapping); indexed.add(enabled); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java index c3b358f4dc0d8..50efdd335c82c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; @@ -30,11 +31,10 @@ public class HistoryTemplateIndexActionMappingsTests extends AbstractWatcherInte public void testIndexActionFields() throws Exception { String index = "the-index"; - String type = "the-type"; PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() .trigger(schedule(interval("5m"))) - .addAction("index", indexAction(index, type))) + .addAction("index", indexAction(index))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); @@ -66,7 +66,7 @@ public void testIndexActionFields() throws Exception { terms = aggs.get("index_action_types"); assertThat(terms, notNullValue()); assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey(type), notNullValue()); - assertThat(terms.getBucketByKey(type).getDocCount(), is(1L)); + assertThat(terms.getBucketByKey(SINGLE_MAPPING_NAME), notNullValue()); + assertThat(terms.getBucketByKey(SINGLE_MAPPING_NAME).getDocCount(), is(1L)); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java index 0ad674f7ed218..b51715c64e2c2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java @@ -19,6 +19,7 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -52,7 +53,7 @@ public void testTimeFields() throws Exception { if (!metadatas.key.startsWith(HistoryStoreField.INDEX_PREFIX)) { continue; } - MappingMetaData metadata = metadatas.value.get("doc"); + MappingMetaData metadata = metadatas.value.get(SINGLE_MAPPING_NAME); assertThat(metadata, notNullValue()); try { Map source = metadata.getSourceAsMap(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java index 05247af948eea..a1b0f170b9fd0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java @@ -15,6 +15,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; @@ -40,9 +41,9 @@ public void testTransformFields() throws Exception { .endObject())); client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .add(client().prepareIndex("idx", "doc", "1") + .add(client().prepareIndex().setIndex("idx").setId("1") .setSource(jsonBuilder().startObject().field("name", "first").field("foo", "bar").endObject())) - .add(client().prepareIndex("idx", "doc", "2") + .add(client().prepareIndex().setIndex("idx").setId("2") .setSource(jsonBuilder().startObject().field("name", "second") .startObject("foo").field("what", "ever").endObject().endObject())) .get(); @@ -74,13 +75,13 @@ public void testTransformFields() throws Exception { GetFieldMappingsResponse response = client().admin().indices() .prepareGetFieldMappings(".watcher-history*") .setFields("result.actions.transform.payload") - .setTypes("doc") + .setTypes(SINGLE_MAPPING_NAME) .includeDefaults(true) .get(); // time might have rolled over to a new day, thus we need to check that this field exists only in one of the history indices List payloadNulls = response.mappings().values().stream() - .map(map -> map.get("doc")) + .map(map -> map.get(SINGLE_MAPPING_NAME)) .map(map -> map.get("result.actions.transform.payload")) .filter(Objects::nonNull) .map(GetFieldMappingsResponse.FieldMappingMetaData::isNull) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java index 5933dd0b0fdec..508da647a2cfa 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java @@ -57,7 +57,7 @@ protected Collection> nodePlugins() { public void testChainedInputsAreWorking() throws Exception { String index = "the-most-awesome-index-ever"; createIndex(index); - client().prepareIndex(index, "type", "id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex().setIndex(index).setId("id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); InetSocketAddress address = internalCluster().httpAddresses()[0]; HttpInput.Builder httpInputBuilder = httpInput(HttpRequestTemplate.builder(address.getHostString(), address.getPort()) @@ -72,7 +72,7 @@ public void testChainedInputsAreWorking() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval(5, SECONDS))) .input(chainedInputBuilder) - .addAction("indexAction", indexAction("my-index", "my-type"))) + .addAction("indexAction", indexAction("my-index"))) .get(); timeWarp().trigger("_name"); @@ -84,7 +84,7 @@ public void testChainedInputsAreWorking() throws Exception { public void assertWatchExecuted() { try { refresh(); - SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type").get(); + SearchResponse searchResponse = client().prepareSearch("my-index").get(); assertHitCount(searchResponse, 1); assertThat(searchResponse.getHits().getAt(0).getSourceAsString(), containsString("the-most-awesome-index-ever")); } catch (IndexNotFoundException e) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java index bef39298d6242..b5d48e5033b0e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java @@ -95,7 +95,7 @@ public void testEmail() throws Exception { .get(); // verifying the email password is stored encrypted in the index - GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + GetResponse response = client().prepareGet().setIndex(Watch.INDEX).setId("_id").get(); assertThat(response, notNullValue()); assertThat(response.getId(), is("_id")); Map source = response.getSource(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java deleted file mode 100644 index 3057d935d7138..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.junit.Before; -import org.mockito.ArgumentCaptor; - -import java.util.HashSet; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class HipChatAccountsTests extends ESTestCase { - private HttpClient httpClient; - - @Before - public void init() throws Exception { - httpClient = mock(HttpClient.class); - } - - public void testProxy() throws Exception { - Settings.Builder builder = Settings.builder() - .put("xpack.notification.hipchat.default_account", "account1"); - addAccountSettings("account1", builder); - HipChatService service = new HipChatService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY, - new HashSet<>(HipChatService.getSettings()))); - HipChatAccount account = service.getAccount("account1"); - - HipChatMessage hipChatMessage = new HipChatMessage("body", new String[]{"rooms"}, null, "from", null, null, null); - - ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); - when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); - - HttpProxy proxy = new HttpProxy("localhost", 8080); - account.send(hipChatMessage, proxy); - - HttpRequest request = argumentCaptor.getValue(); - assertThat(request.proxy(), is(proxy)); - } - - private void addAccountSettings(String name, Settings.Builder builder) { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.notification.hipchat.account." + name + ".secure_auth_token", randomAlphaOfLength(50)); - HipChatAccount.Profile profile = randomFrom(HipChatAccount.Profile.values()); - builder.put("xpack.notification.hipchat.account." + name + ".profile", profile.value()); - builder.setSecureSettings(secureSettings); - if (profile == HipChatAccount.Profile.INTEGRATION) { - builder.put("xpack.notification.hipchat.account." + name + ".room", randomAlphaOfLength(10)); - } - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java deleted file mode 100644 index 96890e5629a26..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; - -public class HipChatMessageTests extends ESTestCase { - - public void testToXContent() throws Exception { - String message = randomAlphaOfLength(10); - String[] rooms = generateRandomStringArray(3, 10, true); - String[] users = generateRandomStringArray(3, 10, true); - String from = randomBoolean() ? null : randomAlphaOfLength(10); - HipChatMessage.Format format = rarely() ? null : randomFrom(HipChatMessage.Format.values()); - HipChatMessage.Color color = rarely() ? null : randomFrom(HipChatMessage.Color.values()); - Boolean notify = rarely() ? null : randomBoolean(); - HipChatMessage msg = new HipChatMessage(message, rooms, users, from, format, color, notify); - - XContentBuilder builder = jsonBuilder(); - boolean includeTarget = randomBoolean(); - if (includeTarget && randomBoolean()) { - msg.toXContent(builder, ToXContent.EMPTY_PARAMS); - } else { - msg.toXContent(builder, ToXContent.EMPTY_PARAMS, includeTarget); - } - BytesReference bytes = BytesReference.bytes(builder); - - XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); - parser.nextToken(); - - assertThat(parser.currentToken(), is(XContentParser.Token.START_OBJECT)); - - message = null; - rooms = null; - users = null; - from = null; - format = null; - color = null; - notify = null; - XContentParser.Token token = null; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if ("body".equals(currentFieldName)) { - message = parser.text(); - } else if ("room".equals(currentFieldName)) { - rooms = parser.list().stream().map(Object::toString).toArray(String[]::new); - } else if ("user".equals(currentFieldName)) { - users = parser.list().stream().map(Object::toString).toArray(String[]::new); - } else if ("from".equals(currentFieldName)) { - from = parser.text(); - } else if ("format".equals(currentFieldName)) { - format = HipChatMessage.Format.parse(parser); - } else if ("color".equals(currentFieldName)) { - color = HipChatMessage.Color.parse(parser); - } else if ("notify".equals(currentFieldName)) { - notify = parser.booleanValue(); - } else { - fail("unexpected xcontent field [" + currentFieldName + "] in hipchat message"); - } - } - - assertThat(message, notNullValue()); - assertThat(message, is(msg.body)); - if (includeTarget) { - if (msg.rooms == null || msg.rooms.length == 0) { - assertThat(rooms, nullValue()); - } else { - assertThat(rooms, arrayContaining(msg.rooms)); - } - if (msg.users == null || msg.users.length == 0) { - assertThat(users, nullValue()); - } else { - assertThat(users, arrayContaining(msg.users)); - } - } - assertThat(from, is(msg.from)); - assertThat(format, is(msg.format)); - assertThat(color, is(msg.color)); - assertThat(notify, is(msg.notify)); - } - - public void testEquals() throws Exception { - String message = randomAlphaOfLength(10); - String[] rooms = generateRandomStringArray(3, 10, true); - String[] users = generateRandomStringArray(3, 10, true); - String from = randomBoolean() ? null : randomAlphaOfLength(10); - HipChatMessage.Format format = rarely() ? null : randomFrom(HipChatMessage.Format.values()); - HipChatMessage.Color color = rarely() ? null : randomFrom(HipChatMessage.Color.values()); - Boolean notify = rarely() ? null : randomBoolean(); - HipChatMessage msg1 = new HipChatMessage(message, rooms, users, from, format, color, notify); - - boolean equals = randomBoolean(); - if (!equals) { - equals = true; - if (rarely()) { - equals = false; - message = "another message"; - } - if (rarely()) { - equals = false; - rooms = rooms == null ? new String[] { "roomX" } : randomBoolean() ? null : new String[] { "roomX" , "roomY"}; - } - if (rarely()) { - equals = false; - users = users == null ? new String[] { "userX" } : randomBoolean() ? null : new String[] { "userX", "userY" }; - } - if (rarely()) { - equals = false; - from = from == null ? "fromX" : randomBoolean() ? null : "fromY"; - } - if (rarely()) { - equals = false; - format = format == null ? - randomFrom(HipChatMessage.Format.values()) : - randomBoolean() ? - null : - randomFromWithExcludes(HipChatMessage.Format.values(), format); - } - if (rarely()) { - equals = false; - color = color == null ? - randomFrom(HipChatMessage.Color.values()) : - randomBoolean() ? - null : - randomFromWithExcludes(HipChatMessage.Color.values(), color); - } - if (rarely()) { - equals = false; - notify = notify == null ? (Boolean) randomBoolean() : randomBoolean() ? null : !notify; - } - } - - HipChatMessage msg2 = new HipChatMessage(message, rooms, users, from, format, color, notify); - assertThat(msg1.equals(msg2), is(equals)); - } - - public void testTemplateParse() throws Exception { - XContentBuilder jsonBuilder = jsonBuilder(); - jsonBuilder.startObject(); - - TextTemplate body = new TextTemplate(randomAlphaOfLength(200)); - jsonBuilder.field("body", body, ToXContent.EMPTY_PARAMS); - TextTemplate[] rooms = null; - if (randomBoolean()) { - jsonBuilder.startArray("room"); - rooms = new TextTemplate[randomIntBetween(1, 3)]; - for (int i = 0; i < rooms.length; i++) { - rooms[i] = new TextTemplate(randomAlphaOfLength(10)); - rooms[i].toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - } - jsonBuilder.endArray(); - } - TextTemplate[] users = null; - if (randomBoolean()) { - jsonBuilder.startArray("user"); - users = new TextTemplate[randomIntBetween(1, 3)]; - for (int i = 0; i < users.length; i++) { - users[i] = new TextTemplate(randomAlphaOfLength(10)); - users[i].toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - } - jsonBuilder.endArray(); - } - String from = null; - if (randomBoolean()) { - from = randomAlphaOfLength(10); - jsonBuilder.field("from", from); - } - TextTemplate color = null; - if (randomBoolean()) { - color = new TextTemplate(randomAlphaOfLength(10)); - jsonBuilder.field("color", color, ToXContent.EMPTY_PARAMS); - } - HipChatMessage.Format format = null; - if (randomBoolean()) { - format = randomFrom(HipChatMessage.Format.values()); - jsonBuilder.field("format", format.value()); - } - Boolean notify = null; - if (randomBoolean()) { - notify = randomBoolean(); - jsonBuilder.field("notify", notify); - } - - BytesReference bytes = BytesReference.bytes(jsonBuilder.endObject()); - XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); - parser.nextToken(); - - HipChatMessage.Template template = HipChatMessage.Template.parse(parser); - - assertThat(template, notNullValue()); - assertThat(template.body, is(body)); - if (rooms == null) { - assertThat(template.rooms, nullValue()); - } else { - assertThat(template.rooms, arrayContaining(rooms)); - } - if (users == null) { - assertThat(template.users, nullValue()); - } else { - assertThat(template.users, arrayContaining(users)); - } - assertThat(template.from, is(from)); - assertThat(template.color, is(color)); - assertThat(template.format, is(format)); - assertThat(template.notify, is(notify)); - } - - public void testTemplateParseSelfGenerated() throws Exception { - TextTemplate body = new TextTemplate(randomAlphaOfLength(10)); - HipChatMessage.Template.Builder templateBuilder = new HipChatMessage.Template.Builder(body); - - if (randomBoolean()) { - int count = randomIntBetween(1, 3); - for (int i = 0; i < count; i++) { - templateBuilder.addRooms(new TextTemplate(randomAlphaOfLength(10))); - } - } - if (randomBoolean()) { - int count = randomIntBetween(1, 3); - for (int i = 0; i < count; i++) { - templateBuilder.addUsers(new TextTemplate(randomAlphaOfLength(10))); - } - } - if (randomBoolean()) { - templateBuilder.setFrom(randomAlphaOfLength(10)); - } - if (randomBoolean()) { - templateBuilder.setColor(new TextTemplate(randomAlphaOfLength(5))); - } - if (randomBoolean()) { - templateBuilder.setFormat(randomFrom(HipChatMessage.Format.values())); - } - if (randomBoolean()) { - templateBuilder.setNotify(randomBoolean()); - } - HipChatMessage.Template template = templateBuilder.build(); - - XContentBuilder jsonBuilder = jsonBuilder(); - template.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - BytesReference bytes = BytesReference.bytes(jsonBuilder); - - XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); - parser.nextToken(); - - HipChatMessage.Template parsed = HipChatMessage.Template.parse(parser); - - assertThat(parsed, equalTo(template)); - } - - public void testAuthTokenParamIsFiltered() throws Exception { - HttpResponse response = new HttpResponse(500); - String token = randomAlphaOfLength(20); - HttpRequest request = HttpRequest.builder("localhost", 1234).setParam("auth_token", token).build(); - - // String body, String[] rooms, String[] users, String from, Format format, Color color, Boolean notify - HipChatMessage hipChatMessage = new HipChatMessage("body", new String[]{"room"}, null, "from", - HipChatMessage.Format.TEXT, HipChatMessage.Color.RED, false); - SentMessages.SentMessage sentMessage = SentMessages.SentMessage.responded("targetName", SentMessages.SentMessage.TargetType.ROOM, - hipChatMessage, request, response); - - - try (XContentBuilder builder = jsonBuilder()) { - WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); - sentMessage.toXContent(builder, params); - assertThat(Strings.toString(builder), containsString(token)); - - try (XContentParser parser = builder.contentType().xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - Strings.toString(builder))) { - parser.map(); - } - } - try (XContentBuilder builder = jsonBuilder()) { - sentMessage.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertThat(Strings.toString(builder), not(containsString(token))); - - try (XContentParser parser = builder.contentType().xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - Strings.toString(builder))) { - parser.map(); - } - } - } - - static E randomFromWithExcludes(E[] values, E... exclude) { - List excludes = Arrays.asList(exclude); - List includes = new ArrayList<>(); - for (E value : values) { - if (!excludes.contains(value)) { - includes.add(value); - } - } - return randomFrom(includes); - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java deleted file mode 100644 index a10a102e414ca..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.junit.Before; - -import java.util.HashSet; - -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.Mockito.mock; - -public class HipChatServiceTests extends ESTestCase { - private HttpClient httpClient; - - @Before - public void init() throws Exception { - httpClient = mock(HttpClient.class); - } - - public void testSingleAccountV1() throws Exception { - String accountName = randomAlphaOfLength(10); - String host = randomBoolean() ? null : "_host"; - int port = randomBoolean() ? -1 : randomIntBetween(300, 400); - String defaultRoom = randomBoolean() ? null : "_r1, _r2"; - String defaultFrom = randomBoolean() ? null : "_from"; - HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); - HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); - Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.notification.hipchat.account." + accountName + ".secure_auth_token", "_token"); - Settings.Builder settingsBuilder = Settings.builder() - .put("xpack.notification.hipchat.account." + accountName + ".profile", HipChatAccount.Profile.V1.value()) - .setSecureSettings(secureSettings); - if (host != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); - } - if (port > 0) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); - } - buildMessageDefaults(accountName, settingsBuilder, defaultRoom, null, defaultFrom, defaultColor, defaultFormat, defaultNotify); - HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, - new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); - - HipChatAccount account = service.getAccount(accountName); - assertThat(account, notNullValue()); - assertThat(account.name, is(accountName)); - assertThat(account.authToken, is("_token")); - assertThat(account.profile, is(HipChatAccount.Profile.V1)); - assertThat(account.httpClient, is(httpClient)); - assertThat(account.server, notNullValue()); - assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); - assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); - assertThat(account, instanceOf(V1Account.class)); - if (defaultRoom == null) { - assertThat(((V1Account) account).defaults.rooms, nullValue()); - } else { - assertThat(((V1Account) account).defaults.rooms, arrayContaining("_r1", "_r2")); - } - assertThat(((V1Account) account).defaults.from, is(defaultFrom)); - assertThat(((V1Account) account).defaults.color, is(defaultColor)); - assertThat(((V1Account) account).defaults.format, is(defaultFormat)); - assertThat(((V1Account) account).defaults.notify, is(defaultNotify)); - - // with a single account defined, making sure that that account is set to the default one. - assertThat(service.getAccount(null), sameInstance(account)); - } - - public void testSingleAccountIntegration() throws Exception { - String accountName = randomAlphaOfLength(10); - String host = randomBoolean() ? null : "_host"; - int port = randomBoolean() ? -1 : randomIntBetween(300, 400); - String room = randomAlphaOfLength(10); - String defaultFrom = randomBoolean() ? null : "_from"; - HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); - HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); - Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.notification.hipchat.account." + accountName + ".secure_auth_token", "_token"); - Settings.Builder settingsBuilder = Settings.builder() - .put("xpack.notification.hipchat.account." + accountName + ".profile", - HipChatAccount.Profile.INTEGRATION.value()) - .setSecureSettings(secureSettings) - .put("xpack.notification.hipchat.account." + accountName + ".room", room); - if (host != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); - } - if (port > 0) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); - } - buildMessageDefaults(accountName, settingsBuilder, null, null, defaultFrom, defaultColor, defaultFormat, defaultNotify); - HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, - new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); - - HipChatAccount account = service.getAccount(accountName); - assertThat(account, notNullValue()); - assertThat(account.name, is(accountName)); - assertThat(account.authToken, is("_token")); - assertThat(account.profile, is(HipChatAccount.Profile.INTEGRATION)); - assertThat(account.httpClient, is(httpClient)); - assertThat(account.server, notNullValue()); - assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); - assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); - assertThat(account, instanceOf(IntegrationAccount.class)); - assertThat(((IntegrationAccount) account).room, is(room)); - assertThat(((IntegrationAccount) account).defaults.color, is(defaultColor)); - assertThat(((IntegrationAccount) account).defaults.format, is(defaultFormat)); - assertThat(((IntegrationAccount) account).defaults.notify, is(defaultNotify)); - - // with a single account defined, making sure that that account is set to the default one. - assertThat(service.getAccount(null), sameInstance(account)); - } - - public void testSingleAccountIntegrationNoRoomSetting() throws Exception { - String accountName = randomAlphaOfLength(10); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.notification.hipchat.account." + accountName + ".secure_auth_token", "_token"); - Settings.Builder settingsBuilder = Settings.builder() - .put("xpack.notification.hipchat.account." + accountName + ".profile", - HipChatAccount.Profile.INTEGRATION.value()) - .setSecureSettings(secureSettings); - SettingsException e = expectThrows(SettingsException.class, () -> - new HipChatService(settingsBuilder.build(), httpClient, - new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))).getAccount(null)); - assertThat(e.getMessage(), containsString("missing required [room] setting for [integration] account profile")); - } - - public void testSingleAccountUser() throws Exception { - String accountName = randomAlphaOfLength(10); - String host = randomBoolean() ? null : "_host"; - int port = randomBoolean() ? -1 : randomIntBetween(300, 400); - String defaultRoom = randomBoolean() ? null : "_r1, _r2"; - String defaultUser = randomBoolean() ? null : "_u1, _u2"; - HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); - HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); - Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); - - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("xpack.notification.hipchat.account." + accountName + ".secure_auth_token", "_token"); - Settings.Builder settingsBuilder = Settings.builder() - .put("xpack.notification.hipchat.account." + accountName + ".profile", HipChatAccount.Profile.USER.value()) - .setSecureSettings(secureSettings); - if (host != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); - } - if (port > 0) { - settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); - } - buildMessageDefaults(accountName, settingsBuilder, defaultRoom, defaultUser, null, defaultColor, defaultFormat, defaultNotify); - HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, - new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); - - HipChatAccount account = service.getAccount(accountName); - assertThat(account, notNullValue()); - assertThat(account.name, is(accountName)); - assertThat(account.authToken, is("_token")); - assertThat(account.profile, is(HipChatAccount.Profile.USER)); - assertThat(account.httpClient, is(httpClient)); - assertThat(account.server, notNullValue()); - assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); - assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); - assertThat(account, instanceOf(UserAccount.class)); - if (defaultRoom == null) { - assertThat(((UserAccount) account).defaults.rooms, nullValue()); - } else { - assertThat(((UserAccount) account).defaults.rooms, arrayContaining("_r1", "_r2")); - } - if (defaultUser == null) { - assertThat(((UserAccount) account).defaults.users, nullValue()); - } else { - assertThat(((UserAccount) account).defaults.users, arrayContaining("_u1", "_u2")); - } - assertThat(((UserAccount) account).defaults.color, is(defaultColor)); - assertThat(((UserAccount) account).defaults.format, is(defaultFormat)); - assertThat(((UserAccount) account).defaults.notify, is(defaultNotify)); - - // with a single account defined, making sure that that account is set to the default one. - assertThat(service.getAccount(null), sameInstance(account)); - } - - public void testMultipleAccounts() throws Exception { - HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); - HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); - Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); - Settings.Builder settingsBuilder = Settings.builder(); - String defaultAccount = "_a" + randomIntBetween(0, 4); - settingsBuilder.put("xpack.notification.hipchat.default_account", defaultAccount); - final MockSecureSettings secureSettings = new MockSecureSettings(); - settingsBuilder.setSecureSettings(secureSettings); - - final boolean customGlobalServer = randomBoolean(); - if (customGlobalServer) { - settingsBuilder.put("xpack.notification.hipchat.host", "_host_global"); - settingsBuilder.put("xpack.notification.hipchat.port", 299); - } - - for (int i = 0; i < 5; i++) { - String name = "_a" + i; - String prefix = "xpack.notification.hipchat.account." + name; - HipChatAccount.Profile profile = randomFrom(HipChatAccount.Profile.values()); - settingsBuilder.put(prefix + ".profile", profile); - secureSettings.setString(prefix + ".secure_auth_token", "_token" + i); - if (profile == HipChatAccount.Profile.INTEGRATION) { - settingsBuilder.put(prefix + ".room", "_room" + i); - } - if (i % 2 == 0) { - settingsBuilder.put(prefix + ".host", "_host" + i); - settingsBuilder.put(prefix + ".port", 300 + i); - } - buildMessageDefaults(name, settingsBuilder, null, null, null, defaultColor, defaultFormat, defaultNotify); - } - - HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, - new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); - - for (int i = 0; i < 5; i++) { - String name = "_a" + i; - HipChatAccount account = service.getAccount(name); - assertThat(account, notNullValue()); - assertThat(account.name, is(name)); - assertThat(account.authToken, is("_token" + i)); - assertThat(account.profile, notNullValue()); - if (account.profile == HipChatAccount.Profile.INTEGRATION) { - assertThat(account, instanceOf(IntegrationAccount.class)); - assertThat(((IntegrationAccount) account).room, is("_room" + i)); - } - assertThat(account.httpClient, is(httpClient)); - assertThat(account.server, notNullValue()); - if (i % 2 == 0) { - assertThat(account.server.host(), is("_host" + i)); - assertThat(account.server.port(), is(300 + i)); - } else if (customGlobalServer) { - assertThat(account.server.host(), is("_host_global")); - assertThat(account.server.port(), is(299)); - } else { - assertThat(account.server.host(), is(HipChatServer.DEFAULT.host())); - assertThat(account.server.port(), is(HipChatServer.DEFAULT.port())); - } - } - - assertThat(service.getAccount(null), sameInstance(service.getAccount(defaultAccount))); - } - - private void buildMessageDefaults(String account, Settings.Builder settingsBuilder, String room, String user, String from, - HipChatMessage.Color color, HipChatMessage.Format format, Boolean notify) { - if (room != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.room", room); - } - if (user != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.user", user); - } - if (from != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.from", from); - } - if (color != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.color", color.value()); - } - if (format != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.format", format); - } - if (notify != null) { - settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.notify", notify); - } - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java deleted file mode 100644 index df1f5d3f47294..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class IntegrationAccountTests extends ESTestCase { - - public void testSettings() throws Exception { - String accountName = "_name"; - - Settings.Builder sb = Settings.builder(); - - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), authToken); - sb.setSecureSettings(secureSettings); - - String host = HipChatServer.DEFAULT.host(); - if (randomBoolean()) { - host = randomAlphaOfLength(10); - sb.put("host", host); - } - int port = HipChatServer.DEFAULT.port(); - if (randomBoolean()) { - port = randomIntBetween(300, 400); - sb.put("port", port); - } - - String room = randomAlphaOfLength(10); - sb.put(IntegrationAccount.ROOM_SETTING, room); - - HipChatMessage.Format defaultFormat = null; - if (randomBoolean()) { - defaultFormat = randomFrom(HipChatMessage.Format.values()); - sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); - } - HipChatMessage.Color defaultColor = null; - if (randomBoolean()) { - defaultColor = randomFrom(HipChatMessage.Color.values()); - sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); - } - Boolean defaultNotify = null; - if (randomBoolean()) { - defaultNotify = randomBoolean(); - sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); - } - Settings settings = sb.build(); - - IntegrationAccount account = new IntegrationAccount(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), - mock(Logger.class)); - - assertThat(account.profile, is(HipChatAccount.Profile.INTEGRATION)); - assertThat(account.name, equalTo(accountName)); - assertThat(account.server.host(), is(host)); - assertThat(account.server.port(), is(port)); - assertThat(account.authToken, is(authToken)); - assertThat(account.room, is(room)); - assertThat(account.defaults.format, is(defaultFormat)); - assertThat(account.defaults.color, is(defaultColor)); - assertThat(account.defaults.notify, is(defaultNotify)); - } - - public void testSettingsNoAuthToken() throws Exception { - Settings.Builder sb = Settings.builder(); - sb.put(IntegrationAccount.ROOM_SETTING, randomAlphaOfLength(10)); - try { - new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - fail("Expected SettingsException"); - } catch (SettingsException e) { - assertThat(e.getMessage(), is("hipchat account [_name] missing required [secure_auth_token] secure setting")); - } - } - - public void testSettingsWithoutRoom() throws Exception { - Settings.Builder sb = Settings.builder(); - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), authToken); - sb.setSecureSettings(secureSettings); - try { - new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - fail("Expected SettingsException"); - } catch (SettingsException e) { - assertThat(e.getMessage(), containsString("missing required [room] setting for [integration] account profile")); - } - } - - public void testSettingsWithoutMultipleRooms() throws Exception { - Settings.Builder sb = Settings.builder(); - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), authToken); - sb.setSecureSettings(secureSettings); - sb.put(IntegrationAccount.ROOM_SETTING, "_r1,_r2"); - try { - new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - fail("Expected SettingsException"); - } catch (SettingsException e) { - assertThat(e.getMessage(), containsString("[room] setting for [integration] account must only be set with a single value")); - } - } - - public void testSend() throws Exception { - String token = randomAlphaOfLength(10); - HttpClient httpClient = mock(HttpClient.class); - String room = "Room with Spaces"; - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), token); - IntegrationAccount account = new IntegrationAccount("_name", Settings.builder() - .put("host", "_host") - .put("port", "443") - .setSecureSettings(secureSettings) - .put("room", room) - .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); - - HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); - HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); - Boolean notify = randomBoolean(); - final HipChatMessage message = new HipChatMessage("_body", null, null, null, format, color, notify); - - HttpRequest req = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - // url encoded already - .path("/v2/room/Room+with+Spaces/notification") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer " + token) - .body(Strings.toString((builder, params) -> { - builder.field("message", message.body); - if (message.format != null) { - builder.field("message_format", message.format.value()); - } - if (message.notify != null) { - builder.field("notify", message.notify); - } - if (message.color != null) { - builder.field("color", String.valueOf(message.color.value())); - } - return builder; - })) - .build(); - - HttpResponse res = mock(HttpResponse.class); - when(res.status()).thenReturn(200); - when(httpClient.execute(req)).thenReturn(res); - - SentMessages sentMessages = account.send(message, null); - verify(httpClient).execute(req); - assertThat(sentMessages.asList(), hasSize(1)); - try (XContentBuilder builder = jsonBuilder()) { - sentMessages.asList().get(0).toXContent(builder, ToXContent.EMPTY_PARAMS); - assertThat(Strings.toString(builder), not(containsString(token))); - } - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java deleted file mode 100644 index 6893999776b02..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpProxy; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; -import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; -import org.mockito.ArgumentCaptor; - -import java.util.HashMap; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class UserAccountTests extends ESTestCase { - - public void testSettings() throws Exception { - String accountName = "_name"; - - Settings.Builder sb = Settings.builder(); - - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(UserAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), authToken); - sb.setSecureSettings(secureSettings); - - String host = HipChatServer.DEFAULT.host(); - if (randomBoolean()) { - host = randomAlphaOfLength(10); - sb.put("host", host); - } - int port = HipChatServer.DEFAULT.port(); - if (randomBoolean()) { - port = randomIntBetween(300, 400); - sb.put("port", port); - } - - String[] defaultRooms = null; - if (randomBoolean()) { - defaultRooms = new String[] { "_r1", "_r2" }; - sb.put(HipChatAccount.DEFAULT_ROOM_SETTING, "_r1,_r2"); - } - String[] defaultUsers = null; - if (randomBoolean()) { - defaultUsers = new String[] { "_u1", "_u2" }; - sb.put(HipChatAccount.DEFAULT_USER_SETTING, "_u1,_u2"); - } - HipChatMessage.Format defaultFormat = null; - if (randomBoolean()) { - defaultFormat = randomFrom(HipChatMessage.Format.values()); - sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); - } - HipChatMessage.Color defaultColor = null; - if (randomBoolean()) { - defaultColor = randomFrom(HipChatMessage.Color.values()); - sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); - } - Boolean defaultNotify = null; - if (randomBoolean()) { - defaultNotify = randomBoolean(); - sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); - } - Settings settings = sb.build(); - - UserAccount account = new UserAccount(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - - assertThat(account.profile, is(HipChatAccount.Profile.USER)); - assertThat(account.name, equalTo(accountName)); - assertThat(account.server.host(), is(host)); - assertThat(account.server.port(), is(port)); - assertThat(account.authToken, is(authToken)); - if (defaultRooms != null) { - assertThat(account.defaults.rooms, arrayContaining(defaultRooms)); - } else { - assertThat(account.defaults.rooms, nullValue()); - } - if (defaultUsers != null) { - assertThat(account.defaults.users, arrayContaining(defaultUsers)); - } else { - assertThat(account.defaults.users, nullValue()); - } - assertThat(account.defaults.format, is(defaultFormat)); - assertThat(account.defaults.color, is(defaultColor)); - assertThat(account.defaults.notify, is(defaultNotify)); - } - - public void testSettingsNoAuthToken() throws Exception { - Settings.Builder sb = Settings.builder(); - try { - new UserAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - fail("Expected SettingsException"); - } catch (SettingsException e) { - assertThat(e.getMessage(), is("hipchat account [_name] missing required [secure_auth_token] secure setting")); - } - } - - public void testSend() throws Exception { - HttpClient httpClient = mock(HttpClient.class); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), "_token"); - UserAccount account = new UserAccount("_name", Settings.builder() - .put("host", "_host") - .put("port", "443") - .setSecureSettings(secureSettings) - .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); - - HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); - HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); - Boolean notify = randomBoolean(); - final HipChatMessage message = new HipChatMessage("_body", new String[] { "_r1", "_r2" }, new String[] { "_u1", "_u2" }, null, - format, color, notify); - - HttpRequest reqR1 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/room/_r1/notification") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer _token") - .body(Strings.toString((builder, params) -> { - builder.field("message", message.body); - if (message.format != null) { - builder.field("message_format", message.format.value()); - } - if (message.notify != null) { - builder.field("notify", message.notify); - } - if (message.color != null) { - builder.field("color", String.valueOf(message.color.value())); - } - return builder; - })) - .build(); - - logger.info("expected (r1): {}", BytesReference.bytes(jsonBuilder().value(reqR1)).utf8ToString()); - - HttpResponse resR1 = mock(HttpResponse.class); - when(resR1.status()).thenReturn(200); - when(httpClient.execute(reqR1)).thenReturn(resR1); - - HttpRequest reqR2 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/room/_r2/notification") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer _token") - .body(Strings.toString((builder, params) -> { - builder.field("message", message.body); - if (message.format != null) { - builder.field("message_format", message.format.value()); - } - if (message.notify != null) { - builder.field("notify", message.notify); - } - if (message.color != null) { - builder.field("color", String.valueOf(message.color.value())); - } - return builder; - })) - .build(); - - logger.info("expected (r2): {}", BytesReference.bytes(jsonBuilder().value(reqR1)).utf8ToString()); - - HttpResponse resR2 = mock(HttpResponse.class); - when(resR2.status()).thenReturn(200); - when(httpClient.execute(reqR2)).thenReturn(resR2); - - HttpRequest reqU1 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/user/_u1/message") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer _token") - .body(Strings.toString((builder, params) -> { - builder.field("message", message.body); - if (message.format != null) { - builder.field("message_format", message.format.value()); - } - if (message.notify != null) { - builder.field("notify", message.notify); - } - return builder; - })) - .build(); - - logger.info("expected (u1): {}", BytesReference.bytes(jsonBuilder().value(reqU1)).utf8ToString()); - - HttpResponse resU1 = mock(HttpResponse.class); - when(resU1.status()).thenReturn(200); - when(httpClient.execute(reqU1)).thenReturn(resU1); - - HttpRequest reqU2 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v2/user/_u2/message") - .setHeader("Content-Type", "application/json") - .setHeader("Authorization", "Bearer _token") - .body(Strings.toString((builder, params) -> { - builder.field("message", message.body); - if (message.format != null) { - builder.field("message_format", message.format.value()); - } - if (message.notify != null) { - builder.field("notify", message.notify); - } - return builder; - })) - .build(); - - logger.info("expected (u2): {}", BytesReference.bytes(jsonBuilder().value(reqU2)).utf8ToString()); - - HttpResponse resU2 = mock(HttpResponse.class); - when(resU2.status()).thenReturn(200); - when(httpClient.execute(reqU2)).thenReturn(resU2); - - account.send(message, null); - - verify(httpClient).execute(reqR1); - verify(httpClient).execute(reqR2); - verify(httpClient).execute(reqU2); - verify(httpClient).execute(reqU2); - } - - public void testColorIsOptional() throws Exception { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), "awesome-auth-token"); - Settings settings = Settings.builder() - .put("user", "testuser") - .setSecureSettings(secureSettings) - .build(); - UserAccount userAccount = createUserAccount(settings); - - TextTemplate body = new TextTemplate("body"); - TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("room")}; - HipChatMessage.Template template = - new HipChatMessage.Template(body, rooms, null, "sender", HipChatMessage.Format.TEXT, null, true); - - HipChatMessage message = userAccount.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); - assertThat(message.color, is(nullValue())); - } - - public void testFormatIsOptional() throws Exception { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), "awesome-auth-token"); - Settings settings = Settings.builder() - .put("user", "testuser") - .setSecureSettings(secureSettings) - .build(); - UserAccount userAccount = createUserAccount(settings); - - TextTemplate body = new TextTemplate("body"); - TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("room") }; - HipChatMessage.Template template = new HipChatMessage.Template(body, rooms, null, "sender", null, - new TextTemplate("yellow"), true); - - HipChatMessage message = userAccount.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); - assertThat(message.format, is(nullValue())); - } - - public void testRoomNameIsUrlEncoded() throws Exception { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), "awesome-auth-token"); - Settings settings = Settings.builder() - .put("user", "testuser") - .setSecureSettings(secureSettings) - .build(); - HipChatServer hipChatServer = mock(HipChatServer.class); - HttpClient httpClient = mock(HttpClient.class); - UserAccount account = new UserAccount("notify-monitoring", settings, hipChatServer, httpClient, logger); - - TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("Room with Spaces")}; - HipChatMessage.Template template = - new HipChatMessage.Template(new TextTemplate("body"), rooms, null, "sender", HipChatMessage.Format.TEXT, null, true); - - HipChatMessage message = account.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); - account.send(message, HttpProxy.NO_PROXY); - - ArgumentCaptor captor = ArgumentCaptor.forClass(HttpRequest.class); - verify(httpClient).execute(captor.capture()); - assertThat(captor.getAllValues(), hasSize(1)); - assertThat(captor.getValue().path(), not(containsString("Room with Spaces"))); - assertThat(captor.getValue().path(), containsString("Room%20with%20Spaces")); - } - - private UserAccount createUserAccount(Settings settings) { - HipChatServer hipChatServer = mock(HipChatServer.class); - HttpClient httpClient = mock(HttpClient.class); - return new UserAccount("notify-monitoring", settings, hipChatServer, httpClient, logger); - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java deleted file mode 100644 index 105965539f604..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher.notification.hipchat; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.common.http.HttpClient; -import org.elasticsearch.xpack.watcher.common.http.HttpMethod; -import org.elasticsearch.xpack.watcher.common.http.HttpRequest; -import org.elasticsearch.xpack.watcher.common.http.HttpResponse; -import org.elasticsearch.xpack.watcher.common.http.Scheme; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class V1AccountTests extends ESTestCase { - public void testSettings() throws Exception { - String accountName = "_name"; - - Settings.Builder sb = Settings.builder(); - - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(V1Account.SECURE_AUTH_TOKEN_SETTING.getKey(), authToken); - sb.setSecureSettings(secureSettings); - - String host = HipChatServer.DEFAULT.host(); - if (randomBoolean()) { - host = randomAlphaOfLength(10); - sb.put("host", host); - } - int port = HipChatServer.DEFAULT.port(); - if (randomBoolean()) { - port = randomIntBetween(300, 400); - sb.put("port", port); - } - - String[] defaultRooms = null; - if (randomBoolean()) { - defaultRooms = new String[] { "_r1", "_r2" }; - sb.put(HipChatAccount.DEFAULT_ROOM_SETTING, "_r1,_r2"); - } - String defaultFrom = null; - if (randomBoolean()) { - defaultFrom = randomAlphaOfLength(10); - sb.put(HipChatAccount.DEFAULT_FROM_SETTING, defaultFrom); - } - HipChatMessage.Format defaultFormat = null; - if (randomBoolean()) { - defaultFormat = randomFrom(HipChatMessage.Format.values()); - sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); - } - HipChatMessage.Color defaultColor = null; - if (randomBoolean()) { - defaultColor = randomFrom(HipChatMessage.Color.values()); - sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); - } - Boolean defaultNotify = null; - if (randomBoolean()) { - defaultNotify = randomBoolean(); - sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); - } - Settings settings = sb.build(); - - V1Account account = new V1Account(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - - assertThat(account.profile, is(HipChatAccount.Profile.V1)); - assertThat(account.name, equalTo(accountName)); - assertThat(account.server.host(), is(host)); - assertThat(account.server.port(), is(port)); - assertThat(account.authToken, is(authToken)); - if (defaultRooms != null) { - assertThat(account.defaults.rooms, arrayContaining(defaultRooms)); - } else { - assertThat(account.defaults.rooms, nullValue()); - } - assertThat(account.defaults.from, is(defaultFrom)); - assertThat(account.defaults.format, is(defaultFormat)); - assertThat(account.defaults.color, is(defaultColor)); - assertThat(account.defaults.notify, is(defaultNotify)); - } - - public void testSettingsNoAuthToken() throws Exception { - Settings.Builder sb = Settings.builder(); - try { - new V1Account("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); - fail("Expected SettingsException"); - } catch (SettingsException e) { - assertThat(e.getMessage(), is("hipchat account [_name] missing required [secure_auth_token] secure setting")); - } - } - - public void testSend() throws Exception { - HttpClient httpClient = mock(HttpClient.class); - String authToken = randomAlphaOfLength(50); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(IntegrationAccount.SECURE_AUTH_TOKEN_SETTING.getKey(), "_token"); - V1Account account = new V1Account("_name", Settings.builder() - .put("host", "_host") - .put("port", "443") - .setSecureSettings(secureSettings) - .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); - - HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); - HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); - Boolean notify = randomBoolean(); - HipChatMessage message = new HipChatMessage("_body", new String[] { "Room with Spaces", "_r2" }, null, "_from", format, - color, notify); - - HttpRequest req1 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v1/rooms/message") - .setHeader("Content-Type", "application/x-www-form-urlencoded") - .setParam("format", "json") - .setParam("auth_token", "_token") - .body(new StringBuilder() - .append("room_id=").append("Room+with+Spaces&") - .append("from=").append("_from&") - .append("message=").append("_body&") - .append("message_format=").append(format.value()).append("&") - .append("color=").append(color.value()).append("&") - .append("notify=").append(notify ? "1" : "0") - .toString()) - .build(); - - logger.info("expected (r1): {}", BytesReference.bytes(jsonBuilder().value(req1)).utf8ToString()); - - HttpResponse res1 = mock(HttpResponse.class); - when(res1.status()).thenReturn(200); - when(httpClient.execute(req1)).thenReturn(res1); - - HttpRequest req2 = HttpRequest.builder("_host", 443) - .method(HttpMethod.POST) - .scheme(Scheme.HTTPS) - .path("/v1/rooms/message") - .setHeader("Content-Type", "application/x-www-form-urlencoded") - .setParam("format", "json") - .setParam("auth_token", "_token") - .body(new StringBuilder() - .append("room_id=").append("_r2&") - .append("from=").append("_from&") - .append("message=").append("_body&") - .append("message_format=").append(format.value()).append("&") - .append("color=").append(color.value()).append("&") - .append("notify=").append(notify ? "1" : "0") - .toString()) - .build(); - - logger.info("expected (r2): {}", BytesReference.bytes(jsonBuilder().value(req2)).utf8ToString()); - - HttpResponse res2 = mock(HttpResponse.class); - when(res2.status()).thenReturn(200); - when(httpClient.execute(req2)).thenReturn(res2); - - account.send(message, null); - - verify(httpClient).execute(req1); - verify(httpClient).execute(req2); - } -} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index 60ca2b83b2f85..bd55e75795382 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -31,14 +31,16 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.indexlifecycle.action.PutLifecycleAction; import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; @@ -52,15 +54,18 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.elasticsearch.mock.orig.Mockito.verify; import static org.elasticsearch.mock.orig.Mockito.when; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verifyZeroInteractions; @@ -68,11 +73,13 @@ public class WatcherIndexTemplateRegistryTests extends ESTestCase { private WatcherIndexTemplateRegistry registry; private NamedXContentRegistry xContentRegistry; + private ClusterService clusterService; + private ThreadPool threadPool; private Client client; @Before public void createRegistryAndClient() { - ThreadPool threadPool = mock(ThreadPool.class); + threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(threadPool.generic()).thenReturn(EsExecutors.newDirectExecutorService()); @@ -89,14 +96,14 @@ public void createRegistryAndClient() { return null; }).when(indicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); - ClusterService clusterService = mock(ClusterService.class); + clusterService = mock(ClusterService.class); List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); entries.addAll(Arrays.asList( new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), (p) -> TimeseriesLifecycleType.INSTANCE), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse))); xContentRegistry = new NamedXContentRegistry(entries); - registry = new WatcherIndexTemplateRegistry(clusterService, threadPool, client, xContentRegistry); + registry = new WatcherIndexTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); } public void testThatNonExistingTemplatesAreAddedImmediately() { @@ -110,9 +117,37 @@ public void testThatNonExistingTemplatesAreAddedImmediately() { // now delete one template from the cluster state and lets retry ClusterChangedEvent newEvent = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, - WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME), nodes); + WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME), nodes); registry.clusterChanged(newEvent); - verify(client.admin().indices(), times(4)).putTemplate(argumentCaptor.capture(), anyObject()); + ArgumentCaptor captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); + verify(client.admin().indices(), times(4)).putTemplate(captor.capture(), anyObject()); + PutIndexTemplateRequest req = captor.getAllValues().stream() + .filter(r -> r.name().equals(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME)) + .findFirst() + .orElseThrow(() -> new AssertionError("expected the watch history template to be put")); + assertThat(req.settings().get("index.lifecycle.name"), equalTo("watch-history-ilm-policy")); + } + + public void testThatNonExistingTemplatesAreAddedEvenWithILMDisabled() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + registry = new WatcherIndexTemplateRegistry(Settings.builder() + .put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), false).build(), + clusterService, threadPool, client, xContentRegistry); + ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyList(), Collections.emptyMap(), nodes); + registry.clusterChanged(event); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); + verify(client.admin().indices(), times(3)).putTemplate(argumentCaptor.capture(), anyObject()); + + // now delete one template from the cluster state and lets retry + ClusterChangedEvent newEvent = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, + WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME), nodes); + registry.clusterChanged(newEvent); + ArgumentCaptor captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); + verify(client.admin().indices(), times(5)).putTemplate(captor.capture(), anyObject()); + captor.getAllValues().forEach(req -> assertNull(req.settings().get("index.lifecycle.name"))); + verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); } public void testThatNonExistingPoliciesAreAddedImmediately() { @@ -136,6 +171,18 @@ public void testPolicyAlreadyExists() { verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); } + public void testNoPolicyButILMDisabled() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + registry = new WatcherIndexTemplateRegistry(Settings.builder() + .put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), false).build(), + clusterService, threadPool, client, xContentRegistry); + ClusterChangedEvent event = createClusterChangedEvent(Settings.EMPTY, Collections.emptyList(), Collections.emptyMap(), nodes); + registry.clusterChanged(event); + verify(client, times(0)).execute(eq(PutLifecycleAction.INSTANCE), anyObject(), anyObject()); + } + public void testPolicyAlreadyExistsButDiffers() throws IOException { DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); @@ -206,31 +253,45 @@ private ClusterChangedEvent createClusterChangedEvent(List existingTempl return createClusterChangedEvent(existingTemplateNames, Collections.emptyMap(), nodes); } - private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, - Map existingPolicies, - DiscoveryNodes nodes) { - ClusterChangedEvent event = mock(ClusterChangedEvent.class); - when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster()); - ClusterState cs = mock(ClusterState.class); - ClusterBlocks clusterBlocks = mock(ClusterBlocks.class); - when(clusterBlocks.hasGlobalBlock(eq(GatewayService.STATE_NOT_RECOVERED_BLOCK))).thenReturn(false); - when(cs.blocks()).thenReturn(clusterBlocks); - when(event.state()).thenReturn(cs); - - when(cs.getNodes()).thenReturn(nodes); - - MetaData metaData = mock(MetaData.class); + private ClusterState createClusterState(Settings nodeSettings, + List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { ImmutableOpenMap.Builder indexTemplates = ImmutableOpenMap.builder(); for (String name : existingTemplateNames) { indexTemplates.put(name, mock(IndexTemplateMetaData.class)); } - when(metaData.getTemplates()).thenReturn(indexTemplates.build()); + Map existingILMMeta = existingPolicies.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new LifecyclePolicyMetadata(e.getValue(), Collections.emptyMap(), 1, 1))); + IndexLifecycleMetadata ilmMeta = new IndexLifecycleMetadata(existingILMMeta, OperationMode.RUNNING); + + return ClusterState.builder(new ClusterName("test")) + .metaData(MetaData.builder() + .templates(indexTemplates.build()) + .transientSettings(nodeSettings) + .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .build()) + .blocks(new ClusterBlocks.Builder().build()) + .nodes(nodes) + .build(); + } - IndexLifecycleMetadata ilmMeta = mock(IndexLifecycleMetadata.class); - when(ilmMeta.getPolicies()).thenReturn(existingPolicies); - when(metaData.custom(anyObject())).thenReturn(ilmMeta); - when(cs.metaData()).thenReturn(metaData); + private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { + return createClusterChangedEvent(Settings.EMPTY, existingTemplateNames, existingPolicies, nodes); + } + + private ClusterChangedEvent createClusterChangedEvent(Settings nodeSettings, + List existingTemplateNames, + Map existingPolicies, + DiscoveryNodes nodes) { + ClusterState cs = createClusterState(nodeSettings, existingTemplateNames, existingPolicies, nodes); + ClusterChangedEvent realEvent = new ClusterChangedEvent("created-from-test", cs, + ClusterState.builder(new ClusterName("test")).build()); + ClusterChangedEvent event = spy(realEvent); + when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster()); return event; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index f710531532b87..8c44ba831b359 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -5,8 +5,12 @@ */ package org.elasticsearch.xpack.watcher.test; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -14,6 +18,7 @@ import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkModule; @@ -22,6 +27,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; @@ -50,7 +56,6 @@ import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.indexlifecycle.IndexLifecycle; -import org.elasticsearch.xpack.watcher.history.HistoryStore; import org.elasticsearch.xpack.watcher.notification.email.Authentication; import org.elasticsearch.xpack.watcher.notification.email.Email; import org.elasticsearch.xpack.watcher.notification.email.EmailService; @@ -194,7 +199,7 @@ public void _setup() throws Exception { internalCluster().setDisruptionScheme(ice); ice.startDisrupting(); } - + stopWatcher(); createWatcherIndicesOrAliases(); startWatcher(); } @@ -221,13 +226,19 @@ private void createWatcherIndicesOrAliases() throws Exception { // alias for .watches, setting the index template to the same as well String watchIndexName; String triggeredWatchIndexName; - if (rarely()) { - watchIndexName = ".watches-alias-index"; - CreateIndexResponse response = client().admin().indices().prepareCreate(watchIndexName) + if (randomBoolean()) { + // Create an index to get the template + String tempIndex = ".watches" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + CreateIndexResponse response = client().admin().indices().prepareCreate(tempIndex) .setCause("Index to test aliases with .watches index") .addAlias(new Alias(Watch.INDEX)) .get(); assertAcked(response); + + // Now replace it with a randomly named index + watchIndexName = randomAlphaOfLengthBetween(5,10).toLowerCase(Locale.ROOT); + replaceWatcherIndexWithRandomlyNamedIndex(Watch.INDEX, watchIndexName); + logger.info("set alias for .watches index to [{}]", watchIndexName); } else { watchIndexName = Watch.INDEX; @@ -239,13 +250,18 @@ private void createWatcherIndicesOrAliases() throws Exception { } // alias for .triggered-watches, ensuring the index template is set appropriately - if (rarely()) { - triggeredWatchIndexName = ".triggered_watches-alias-index"; - CreateIndexResponse response = client().admin().indices().prepareCreate(triggeredWatchIndexName) + if (randomBoolean()) { + String tempIndex = ".triggered_watches-alias-index"; + CreateIndexResponse response = client().admin().indices().prepareCreate(tempIndex) .setCause("Index to test aliases with .triggered-watches index") .addAlias(new Alias(TriggeredWatchStoreField.INDEX_NAME)) .get(); assertAcked(response); + + // Now replace it with a randomly-named index + triggeredWatchIndexName = randomValueOtherThan(watchIndexName, + () -> randomAlphaOfLengthBetween(5,10).toLowerCase(Locale.ROOT)); + replaceWatcherIndexWithRandomlyNamedIndex(TriggeredWatchStoreField.INDEX_NAME, triggeredWatchIndexName); logger.info("set alias for .triggered-watches index to [{}]", triggeredWatchIndexName); } else { triggeredWatchIndexName = TriggeredWatchStoreField.INDEX_NAME; @@ -259,6 +275,38 @@ private void createWatcherIndicesOrAliases() throws Exception { } } + public void replaceWatcherIndexWithRandomlyNamedIndex(String originalIndexOrAlias, String to) { + GetIndexResponse index = client().admin().indices().prepareGetIndex().setIndices(originalIndexOrAlias).get(); + MappingMetaData mapping = index.getMappings().get(index.getIndices()[0]).get(MapperService.SINGLE_MAPPING_NAME); + + Settings settings = index.getSettings().get(index.getIndices()[0]); + Settings.Builder newSettings = Settings.builder().put(settings); + newSettings.remove("index.provided_name"); + newSettings.remove("index.uuid"); + newSettings.remove("index.creation_date"); + newSettings.remove("index.version.created"); + + CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate(to) + .addMapping(MapperService.SINGLE_MAPPING_NAME, mapping.sourceAsMap()) + .setSettings(newSettings) + .get(); + assertTrue(createIndexResponse.isAcknowledged()); + ensureGreen(to); + + AtomicReference originalIndex = new AtomicReference<>(originalIndexOrAlias); + boolean watchesIsAlias = client().admin().indices().prepareAliasesExist(originalIndexOrAlias).get().isExists(); + if (watchesIsAlias) { + GetAliasesResponse aliasesResponse = client().admin().indices().prepareGetAliases(originalIndexOrAlias).get(); + assertEquals(1, aliasesResponse.getAliases().size()); + aliasesResponse.getAliases().forEach((aliasRecord) -> { + assertEquals(1, aliasRecord.value.size()); + originalIndex.set(aliasRecord.key); + }); + } + client().admin().indices().prepareDelete(originalIndex.get()).get(); + client().admin().indices().prepareAliases().addAlias(to, originalIndexOrAlias).get(); + } + protected TimeWarp timeWarp() { assert timeWarped() : "cannot access TimeWarp when test context is not time warped"; return timeWarp; @@ -268,22 +316,18 @@ public boolean randomizeNumberOfShardsAndReplicas() { return false; } - protected long docCount(String index, String type, QueryBuilder query) { + protected long docCount(String index, QueryBuilder query) { refresh(); - return docCount(index, type, SearchSourceBuilder.searchSource().query(query)); + return docCount(index, SearchSourceBuilder.searchSource().query(query)); } protected long watchRecordCount(QueryBuilder query) { refresh(); - return docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", - HistoryStore.DOC_TYPE, SearchSourceBuilder.searchSource().query(query)); + return docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", SearchSourceBuilder.searchSource().query(query)); } - protected long docCount(String index, String type, SearchSourceBuilder source) { + protected long docCount(String index, SearchSourceBuilder source) { SearchRequestBuilder builder = client().prepareSearch(index).setSource(source).setSize(0); - if (type != null) { - builder.setTypes(type); - } return builder.get().getHits().getTotalHits().value; } @@ -360,7 +404,7 @@ protected void assertWatchWithMinimumPerformedActionsCount(final String watchNam protected SearchResponse searchWatchRecords(Consumer requestBuilderCallback) { SearchRequestBuilder builder = - client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setTypes(HistoryStore.DOC_TYPE); + client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*"); requestBuilderCallback.accept(builder); return builder.get(); } @@ -538,6 +582,7 @@ public EmailSent send(Email email, Authentication auth, Profile profile, String } protected static class TimeWarp { + private static final Logger logger = LogManager.getLogger(TimeWarp.class); private final List schedulers; private final ClockMock clock; @@ -556,9 +601,14 @@ public ClockMock clock() { } public void trigger(String watchId, int times, TimeValue timeValue) { - boolean isTriggered = schedulers.stream().anyMatch(scheduler -> scheduler.trigger(watchId, times, timeValue)); - String msg = String.format(Locale.ROOT, "could not find watch [%s] to trigger", watchId); - assertThat(msg, isTriggered, is(true)); + long triggeredCount = schedulers.stream() + .filter(scheduler -> scheduler.trigger(watchId, times, timeValue)) + .count(); + String msg = String.format(Locale.ROOT, "watch was triggered on [%d] schedulers, expected [1]", triggeredCount); + if (triggeredCount > 1) { + logger.warn(msg); + } + assertThat(msg, triggeredCount, greaterThanOrEqualTo(1L)); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java index 7f7ff8c93601b..9636d159b52b4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.watcher.common.secret.Secret; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Payload; @@ -52,6 +53,7 @@ import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.hamcrest.Matcher; import javax.mail.internet.AddressException; import java.io.IOException; @@ -69,6 +71,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.hamcrest.Matchers.is; public final class WatcherTestUtils { @@ -188,4 +191,13 @@ public static Watch createTestWatch(String watchName, Client client, HttpClient public static SearchType getRandomSupportedSearchType() { return randomFrom(SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH); } + + public static Matcher isSameDate(ZonedDateTime zonedDateTime) { + /* + When comparing timestamps returned from _search/.watcher-history* the same format of date has to be used + during serialisation to json on index time. + The toString of ZonedDateTime is omitting the millisecond part when is 0. This was not the case in joda. + */ + return is(WatcherDateTimeUtils.formatDate(zonedDateTime)); + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java index e8cfa2e1d3b6f..b530811e1c7e4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java @@ -74,7 +74,7 @@ public static final class SmallSearchInput extends WatcherExecutorServiceBenchma public static void main(String[] args) throws Exception { start(); client.admin().indices().prepareCreate("test").get(); - client.prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); int numAlerts = 1000; for (int i = 0; i < numAlerts; i++) { @@ -129,7 +129,7 @@ public static void main(String[] args) throws Exception { .input(searchInput(templateRequest(new SearchSourceBuilder(), "test")) .extractKeys("hits.total.value")) .condition(new ScriptCondition(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "1 == 1", emptyMap()))) - .addAction("_id", indexAction("index", "type")).buildAsBytes(XContentType.JSON), XContentType.JSON); + .addAction("_id", indexAction("index")).buildAsBytes(XContentType.JSON), XContentType.JSON); putAlertRequest.setId(name); watcherClient.putWatch(putAlertRequest).actionGet(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index cd24c43cbcaf8..9c7c9027e7f85 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -110,12 +110,12 @@ public static void main(String[] args) throws Exception { client.admin().indices().prepareDelete("_all").get(); client.admin().indices().prepareCreate("test").get(); - client.prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); System.out.println("===============> indexing [" + numWatches + "] watches"); for (int i = 0; i < numWatches; i++) { final String id = "_id_" + i; - client.prepareIndex(Watch.INDEX, Watch.DOC_TYPE, id) + client.prepareIndex().setIndex(Watch.INDEX).setId(id) .setSource(new WatchSourceBuilder() .trigger(schedule(interval(interval + "s"))) .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index 5b62bca40fb09..2f2299d7d65e0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -160,7 +160,7 @@ public void testMalformedWatch() throws Exception { // In watch store we fail parsing if an watch contains undefined fields. } try { - client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_name") + client().prepareIndex().setIndex(Watch.INDEX).setId("_name") .setSource(watchSource) .get(); fail(); @@ -177,7 +177,7 @@ public void testModifyWatches() throws Exception { WatchSourceBuilder source = watchBuilder() .trigger(schedule(interval("5s"))) .input(searchInput(searchRequest)) - .addAction("_id", indexAction("idx", "action")); + .addAction("_id", indexAction("idx")); watcherClient().preparePutWatch("_name") .setSource(source.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index 6382909f96f33..0b8ef459e761c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.watcher.condition.CompareCondition; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.execution.TriggeredWatch; -import org.elasticsearch.xpack.watcher.history.HistoryStore; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; @@ -62,7 +61,7 @@ protected boolean timeWarped() { } public void testLoadMalformedWatchRecord() throws Exception { - client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_id") + client().prepareIndex().setIndex(Watch.INDEX).setId("_id") .setSource(jsonBuilder().startObject() .startObject(WatchField.TRIGGER.getPreferredName()) .startObject("schedule") @@ -80,7 +79,7 @@ public void testLoadMalformedWatchRecord() throws Exception { ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); ExecutableCondition condition = InternalAlwaysCondition.INSTANCE; String index = HistoryStoreField.getHistoryIndexNameForTime(now); - client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + client().prepareIndex().setIndex(index).setId(wid.value()) .setSource(jsonBuilder().startObject() .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) .field(event.type(), event) @@ -98,7 +97,7 @@ public void testLoadMalformedWatchRecord() throws Exception { // unknown condition: wid = new Wid("_id", now); - client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + client().prepareIndex().setIndex(index).setId(wid.value()) .setSource(jsonBuilder().startObject() .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) .field(event.type(), event) @@ -116,7 +115,7 @@ public void testLoadMalformedWatchRecord() throws Exception { // unknown trigger: wid = new Wid("_id", now); - client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + client().prepareIndex().setIndex(index).setId(wid.value()) .setSource(jsonBuilder().startObject() .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) .startObject("unknown").endObject() @@ -141,7 +140,6 @@ public void testLoadMalformedWatchRecord() throws Exception { }); } - @AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915") public void testLoadExistingWatchesUponStartup() throws Exception { stopWatcher(); @@ -152,7 +150,7 @@ public void testLoadExistingWatchesUponStartup() throws Exception { BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < numWatches; i++) { bulkRequestBuilder.add( - client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_id" + i) + client().prepareIndex().setIndex(Watch.INDEX).setId("_id" + i) .setSource(watchBuilder() .trigger(schedule(cron("0 0/5 * * * ? 2050"))) .input(searchInput(request)) @@ -174,7 +172,7 @@ public void testLoadExistingWatchesUponStartup() throws Exception { public void testMixedTriggeredWatchLoading() throws Exception { createIndex("output"); - client().prepareIndex("my-index", "foo", "bar") + client().prepareIndex().setIndex("my-index").setId("bar") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource("field", "value").get(); @@ -191,7 +189,7 @@ public void testMixedTriggeredWatchLoading() throws Exception { .trigger(schedule(cron("0/5 * * * * ? 2050"))) .input(searchInput(request)) .condition(InternalAlwaysCondition.INSTANCE) - .addAction("_id", indexAction("output", "test")) + .addAction("_id", indexAction("output")) .defaultThrottlePeriod(TimeValue.timeValueMillis(0)) ).get(); } @@ -208,10 +206,9 @@ public void testMixedTriggeredWatchLoading() throws Exception { Wid wid = new Wid(watchId, now); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add( - client().prepareIndex( - TriggeredWatchStoreField.INDEX_NAME, - TriggeredWatchStoreField.DOC_TYPE, - triggeredWatch.id().value()) + client().prepareIndex() + .setIndex(TriggeredWatchStoreField.INDEX_NAME) + .setId(triggeredWatch.id().value()) .setSource(jsonBuilder().value(triggeredWatch)) .request()); } @@ -225,7 +222,7 @@ public void testMixedTriggeredWatchLoading() throws Exception { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29846") public void testTriggeredWatchLoading() throws Exception { createIndex("output"); - client().prepareIndex("my-index", "foo", "bar") + client().prepareIndex().setIndex("my-index").setId("bar") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource("field", "value").get(); @@ -238,7 +235,7 @@ public void testTriggeredWatchLoading() throws Exception { .trigger(schedule(cron("0/5 * * * * ? 2050"))) .input(searchInput(request)) .condition(InternalAlwaysCondition.INSTANCE) - .addAction("_id", indexAction("output", "test")) + .addAction("_id", indexAction("output")) .defaultThrottlePeriod(TimeValue.timeValueMillis(0)) ).get(); @@ -253,7 +250,7 @@ public void testTriggeredWatchLoading() throws Exception { Wid wid = new Wid(watchId, now); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add(client() - .prepareIndex(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, triggeredWatch.id().value()) + .prepareIndex().setIndex(TriggeredWatchStoreField.INDEX_NAME).setId(triggeredWatch.id().value()) .setSource(jsonBuilder().value(triggeredWatch)) .setWaitForActiveShards(ActiveShardCount.ALL) ); @@ -325,13 +322,13 @@ public void testWatchRecordSavedTwice() throws Exception { Wid wid = new Wid(watchId, triggeredTime); TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); bulkRequestBuilder.add( - client().prepareIndex(TriggeredWatchStoreField.INDEX_NAME, - TriggeredWatchStoreField.DOC_TYPE, triggeredWatch.id().value()).setSource(jsonBuilder().value(triggeredWatch)) + client().prepareIndex().setIndex(TriggeredWatchStoreField.INDEX_NAME) + .setId(triggeredWatch.id().value()).setSource(jsonBuilder().value(triggeredWatch)) ); String id = internalCluster().getInstance(ClusterService.class).localNode().getId(); WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, "executed", id); - bulkRequestBuilder.add(client().prepareIndex(watchRecordIndex, HistoryStore.DOC_TYPE, watchRecord.id().value()) + bulkRequestBuilder.add(client().prepareIndex().setIndex(watchRecordIndex).setId(watchRecord.id().value()) .setSource(jsonBuilder().value(watchRecord)) ); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index 5c9dafeaca001..d58664fb40760 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -21,11 +21,13 @@ import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import java.util.Locale; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -97,16 +99,19 @@ public void testFailedInputResultWithDotsInFieldNameGetsStored() throws Exceptio assertHitCount(searchResponse, 1); // as fields with dots are allowed in 5.0 again, the mapping must be checked in addition - GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); - byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*") + .addTypes(SINGLE_MAPPING_NAME).get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get(SINGLE_MAPPING_NAME).source().uncompressed(); XContentSource source = new XContentSource(new BytesArray(bytes), XContentType.JSON); // lets make sure the body fields are disabled if (useChained) { - String chainedPath = "doc.properties.result.properties.input.properties.chain.properties.chained.properties.search" + + String chainedPath = SINGLE_MAPPING_NAME + + ".properties.result.properties.input.properties.chain.properties.chained.properties.search" + ".properties.request.properties.body.enabled"; assertThat(source.getValue(chainedPath), is(false)); } else { - String path = "doc.properties.result.properties.input.properties.search.properties.request.properties.body.enabled"; + String path = SINGLE_MAPPING_NAME + + ".properties.result.properties.input.properties.search.properties.request.properties.body.enabled"; assertThat(source.getValue(path), is(false)); } } @@ -135,16 +140,18 @@ public void testPayloadInputWithDotsInFieldNameWorks() throws Exception { assertHitCount(searchResponse, 1); // as fields with dots are allowed in 5.0 again, the mapping must be checked in addition - GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); - byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*") + .addTypes(SINGLE_MAPPING_NAME).get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get(SINGLE_MAPPING_NAME).source().uncompressed(); XContentSource source = new XContentSource(new BytesArray(bytes), XContentType.JSON); // lets make sure the body fields are disabled if (useChained) { - String path = "doc.properties.result.properties.input.properties.chain.properties.chained.properties.payload.enabled"; + String path = SINGLE_MAPPING_NAME + + ".properties.result.properties.input.properties.chain.properties.chained.properties.payload.enabled"; assertThat(source.getValue(path), is(false)); } else { - String path = "doc.properties.result.properties.input.properties.payload.enabled"; + String path = SINGLE_MAPPING_NAME + ".properties.result.properties.input.properties.payload.enabled"; assertThat(source.getValue(path), is(false)); } } @@ -172,10 +179,12 @@ public void testThatHistoryContainsStatus() throws Exception { assertThat(active, is(status.state().isActive())); String timestamp = source.getValue("status.state.timestamp"); - assertThat(timestamp, is(status.state().getTimestamp().toString())); + assertThat(timestamp, WatcherTestUtils.isSameDate(status.state().getTimestamp())); String lastChecked = source.getValue("status.last_checked"); - assertThat(lastChecked, is(status.lastChecked().toString())); + assertThat(lastChecked, WatcherTestUtils.isSameDate(status.lastChecked())); + String lastMetCondition = source.getValue("status.last_met_condition"); + assertThat(lastMetCondition, WatcherTestUtils.isSameDate(status.lastMetCondition())); Integer version = source.getValue("status.version"); int expectedVersion = (int) (status.version() - 1); @@ -189,11 +198,14 @@ public void testThatHistoryContainsStatus() throws Exception { assertThat(lastExecutionSuccesful, is(actionStatus.lastExecution().successful())); // also ensure that the status field is disabled in the watch history - GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); - byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*") + .addTypes(SINGLE_MAPPING_NAME).get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get(SINGLE_MAPPING_NAME).source().uncompressed(); XContentSource mappingSource = new XContentSource(new BytesArray(bytes), XContentType.JSON); - assertThat(mappingSource.getValue("doc.properties.status.enabled"), is(false)); - assertThat(mappingSource.getValue("doc.properties.status.properties.status"), is(nullValue())); - assertThat(mappingSource.getValue("doc.properties.status.properties.status.properties.active"), is(nullValue())); + assertThat(mappingSource.getValue(SINGLE_MAPPING_NAME + ".properties.status.enabled"), is(false)); + assertThat(mappingSource.getValue(SINGLE_MAPPING_NAME + ".properties.status.properties.status"), is(nullValue())); + assertThat(mappingSource.getValue(SINGLE_MAPPING_NAME + ".properties.status.properties.status.properties.active"), is(nullValue())); } + + } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index 60d049f4ff138..fe548abe1f536 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -101,7 +101,7 @@ public void testHttpInput() throws Exception { // verifying the basic auth password is stored encrypted in the index when security // is enabled, and when it's not enabled, it's stored in plain text - GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + GetResponse response = client().prepareGet().setIndex(Watch.INDEX).setId("_id").get(); assertThat(response, notNullValue()); assertThat(response.getId(), is("_id")); Map source = response.getSource(); @@ -179,8 +179,8 @@ public void testWebhookAction() throws Exception { .get(); // verifying the basic auth password is stored encrypted in the index when security - // is enabled, when it's not enabled, the the passowrd should be stored in plain text - GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + // is enabled, when it's not enabled, the password should be stored in plain text + GetResponse response = client().prepareGet().setIndex(Watch.INDEX).setId("_id").get(); assertThat(response, notNullValue()); assertThat(response.getId(), is("_id")); Map source = response.getSource(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java new file mode 100644 index 0000000000000..f6c46f6c68f71 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class RejectedExecutionTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected boolean timeWarped() { + //need to use the real scheduler + return false; + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41734") + public void testHistoryAndTriggeredOnRejection() throws Exception { + WatcherClient watcherClient = watcherClient(); + createIndex("idx"); + client().prepareIndex("idx", "_doc").setSource("field", "a").get(); + refresh(); + WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "a")), "idx"); + watcherClient.preparePutWatch(randomAlphaOfLength(5)) + .setSource(watchBuilder() + .trigger(schedule(interval(1, IntervalSchedule.Interval.Unit.SECONDS))) + .input(searchInput(request)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L)) + .addAction("_logger", loggingAction("_logging") + .setCategory("_category"))) + .get(); + + assertBusy(() -> { + flushAndRefresh(".watcher-history-*"); + SearchResponse searchResponse = client().prepareSearch(".watcher-history-*").get(); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)); + }, 10, TimeUnit.SECONDS); + + flushAndRefresh(".triggered_watches"); + SearchResponse searchResponse = client().prepareSearch(".triggered_watches").get(); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.MONITORING_ENABLED.getKey(), false) + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 1) + .put("xpack.watcher.thread_pool.size", 1) + .put("xpack.watcher.thread_pool.queue_size", 0) + .build(); + } + + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index c1a8623b44719..a0ef5e97d8534 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -54,7 +54,7 @@ public class WatchAckTests extends AbstractWatcherIntegrationTestCase { @Before public void indexTestDocument() { - IndexResponse eventIndexResponse = client().prepareIndex("events", "event", id) + IndexResponse eventIndexResponse = client().prepareIndex().setIndex("events").setId(id) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .setSource("level", "error") .get(); @@ -69,8 +69,8 @@ public void testAckSingleAction() throws Exception { .input(searchInput(templateRequest(searchSource(), "events"))) .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) .transform(searchTransform(templateRequest(searchSource(), "events"))) - .addAction("_a1", indexAction("actions1", "doc")) - .addAction("_a2", indexAction("actions2", "doc")) + .addAction("_a1", indexAction("actions1")) + .addAction("_a2", indexAction("actions2")) .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) .get(); @@ -83,8 +83,8 @@ public void testAckSingleAction() throws Exception { assertThat(ackResponse.getStatus().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKABLE)); refresh(); - long a1CountAfterAck = docCount("actions1", "doc", matchAllQuery()); - long a2CountAfterAck = docCount("actions2", "doc", matchAllQuery()); + long a1CountAfterAck = docCount("actions1", matchAllQuery()); + long a2CountAfterAck = docCount("actions2", matchAllQuery()); assertThat(a1CountAfterAck, greaterThan(0L)); assertThat(a2CountAfterAck, greaterThan(0L)); @@ -93,15 +93,15 @@ public void testAckSingleAction() throws Exception { refresh(); // There shouldn't be more a1 actions in the index after we ack the watch, even though the watch was triggered - long a1CountAfterPostAckFires = docCount("actions1", "doc", matchAllQuery()); + long a1CountAfterPostAckFires = docCount("actions1", matchAllQuery()); assertThat(a1CountAfterPostAckFires, equalTo(a1CountAfterAck)); // There should be more a2 actions in the index after we ack the watch - long a2CountAfterPostAckFires = docCount("actions2", "doc", matchAllQuery()); + long a2CountAfterPostAckFires = docCount("actions2", matchAllQuery()); assertThat(a2CountAfterPostAckFires, greaterThan(a2CountAfterAck)); // Now delete the event and the ack states should change to AWAITS_EXECUTION - DeleteResponse response = client().prepareDelete("events", "event", id).get(); + DeleteResponse response = client().prepareDelete().setIndex("events").setId(id).get(); assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); refresh(); @@ -117,7 +117,7 @@ public void testAckSingleAction() throws Exception { assertThat(parsedWatch.status().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); - long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", null, + long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.ACKNOWLEDGED.id())); assertThat(throttledCount, greaterThan(0L)); } @@ -130,8 +130,8 @@ public void testAckAllActions() throws Exception { .input(searchInput(templateRequest(searchSource(), "events"))) .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) .transform(searchTransform(templateRequest(searchSource(), "events"))) - .addAction("_a1", indexAction("actions1", "doc")) - .addAction("_a2", indexAction("actions2", "doc")) + .addAction("_a1", indexAction("actions1")) + .addAction("_a2", indexAction("actions2")) .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) .get(); @@ -152,8 +152,8 @@ public void testAckAllActions() throws Exception { assertThat(ackResponse.getStatus().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); refresh(); - long a1CountAfterAck = docCount("actions1", "doc", matchAllQuery()); - long a2CountAfterAck = docCount("actions2", "doc", matchAllQuery()); + long a1CountAfterAck = docCount("actions1", matchAllQuery()); + long a2CountAfterAck = docCount("actions2", matchAllQuery()); assertThat(a1CountAfterAck, greaterThanOrEqualTo((long) 1)); assertThat(a2CountAfterAck, greaterThanOrEqualTo((long) 1)); @@ -162,15 +162,15 @@ public void testAckAllActions() throws Exception { refresh(); // There shouldn't be more a1 actions in the index after we ack the watch, even though the watch was triggered - long a1CountAfterPostAckFires = docCount("actions1", "doc", matchAllQuery()); + long a1CountAfterPostAckFires = docCount("actions1", matchAllQuery()); assertThat(a1CountAfterPostAckFires, equalTo(a1CountAfterAck)); // There shouldn't be more a2 actions in the index after we ack the watch, even though the watch was triggered - long a2CountAfterPostAckFires = docCount("actions2", "doc", matchAllQuery()); + long a2CountAfterPostAckFires = docCount("actions2", matchAllQuery()); assertThat(a2CountAfterPostAckFires, equalTo(a2CountAfterAck)); // Now delete the event and the ack states should change to AWAITS_EXECUTION - DeleteResponse response = client().prepareDelete("events", "event", id).get(); + DeleteResponse response = client().prepareDelete().setIndex("events").setId(id).get(); assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); refresh(); @@ -186,7 +186,7 @@ public void testAckAllActions() throws Exception { assertThat(parsedWatch.status().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); - long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", null, + long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.ACKNOWLEDGED.id())); assertThat(throttledCount, greaterThan(0L)); } @@ -199,7 +199,7 @@ public void testAckWithRestart() throws Exception { .input(searchInput(templateRequest(searchSource(), "events"))) .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) .transform(searchTransform(templateRequest(searchSource(), "events"))) - .addAction("_id", indexAction("actions", "action"))) + .addAction("_id", indexAction("actions"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); assertThat(watcherClient().prepareWatcherStats().get().getWatchesCount(), is(1L)); @@ -211,7 +211,7 @@ public void testAckWithRestart() throws Exception { assertThat(ackResponse.getStatus().actionStatus("_id").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); refresh("actions"); - long countAfterAck = client().prepareSearch("actions").setTypes("action").setQuery(matchAllQuery()).get() + long countAfterAck = client().prepareSearch("actions").setQuery(matchAllQuery()).get() .getHits().getTotalHits().value; assertThat(countAfterAck, greaterThanOrEqualTo(1L)); @@ -221,7 +221,7 @@ public void testAckWithRestart() throws Exception { assertThat(watchResponse.getStatus().actionStatus("_id").ackStatus().state(), Matchers.equalTo(ActionStatus.AckStatus.State.ACKED)); refresh(); - GetResponse getResponse = client().get(new GetRequest(Watch.INDEX, Watch.DOC_TYPE, "_name")).actionGet(); + GetResponse getResponse = client().get(new GetRequest(Watch.INDEX, "_name")).actionGet(); Watch indexedWatch = watchParser().parse("_name", true, getResponse.getSourceAsBytesRef(), XContentType.JSON, getResponse.getSeqNo(), getResponse.getPrimaryTerm()); assertThat(watchResponse.getStatus().actionStatus("_id").ackStatus().state(), @@ -231,7 +231,7 @@ public void testAckWithRestart() throws Exception { refresh("actions"); // There shouldn't be more actions in the index after we ack the watch, even though the watch was triggered - long countAfterPostAckFires = docCount("actions", "action", matchAllQuery()); + long countAfterPostAckFires = docCount("actions", matchAllQuery()); assertThat(countAfterPostAckFires, equalTo(countAfterAck)); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java index 042e82765354f..4be91fb0ad8b3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; import org.elasticsearch.xpack.watcher.test.WatcherMockScriptPlugin; @@ -120,9 +119,8 @@ public void testScriptTransform() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("5s"))) .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) - .condition(InternalAlwaysCondition.INSTANCE) .transform(scriptTransform(script)) - .addAction("_id", indexAction("output1", "type"))) + .addAction("_id", indexAction("output1"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); // put a watch that has a action level transform: @@ -130,13 +128,12 @@ public void testScriptTransform() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("5s"))) .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) - .condition(InternalAlwaysCondition.INSTANCE) - .addAction("_id", scriptTransform(script), indexAction("output2", "type"))) + .addAction("_id", scriptTransform(script), indexAction("output2"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); - timeWarp().trigger("_id1"); - timeWarp().trigger("_id2"); + executeWatch("_id1"); + executeWatch("_id2"); refresh(); assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); @@ -156,7 +153,6 @@ public void testScriptTransform() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37882") public void testSearchTransform() throws Exception { createIndex("my-condition-index", "my-payload-index"); ensureGreen("my-condition-index", "my-payload-index"); @@ -179,12 +175,12 @@ public void testSearchTransform() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("5s"))) .input(searchInput(inputRequest)) - .addAction("_id", searchTransform(transformRequest), indexAction("output2", "result")) + .addAction("_id", searchTransform(transformRequest), indexAction("output2")) ).get(); assertThat(putWatchResponse.isCreated(), is(true)); - timeWarp().trigger("_id1"); - timeWarp().trigger("_id2"); + executeWatch("_id1"); + executeWatch("_id2"); refresh(); assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); @@ -211,9 +207,8 @@ public void testChainTransform() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("5s"))) .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) - .condition(InternalAlwaysCondition.INSTANCE) .transform(chainTransform(scriptTransform(script1), scriptTransform(script2))) - .addAction("_id", indexAction("output1", "type"))) + .addAction("_id", indexAction("output1"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); // put a watch that has a action level transform: @@ -221,14 +216,13 @@ public void testChainTransform() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("5s"))) .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) - .condition(InternalAlwaysCondition.INSTANCE) .addAction("_id", chainTransform(scriptTransform(script1), scriptTransform(script2)), - indexAction("output2", "type"))) + indexAction("output2"))) .get(); assertThat(putWatchResponse.isCreated(), is(true)); - timeWarp().trigger("_id1"); - timeWarp().trigger("_id2"); + executeWatch("_id1"); + executeWatch("_id2"); refresh(); assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); @@ -247,4 +241,10 @@ public void testChainTransform() throws Exception { assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); } + + private void executeWatch(String watchId) { + watcherClient().prepareExecuteWatch(watchId) + .setRecordExecution(true) + .get(); + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index 1080880ea8bbb..f89d0eee7c941 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -55,7 +55,7 @@ public void testDeactivateAndActivate() throws Exception { .setSource(watchBuilder() .trigger(schedule(interval("1s"))) .input(simpleInput("foo", "bar")) - .addAction("_a1", indexAction("actions", "action1")) + .addAction("_a1", indexAction("actions")) .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) .get(); @@ -86,13 +86,13 @@ public void testDeactivateAndActivate() throws Exception { logger.info("Ensured no more watches are being executed"); refresh(); - long count1 = docCount(".watcher-history*", "doc", matchAllQuery()); + long count1 = docCount(".watcher-history*", matchAllQuery()); logger.info("Sleeping for 5 seconds, watch history count [{}]", count1); Thread.sleep(5000); refresh(); - long count2 = docCount(".watcher-history*", "doc", matchAllQuery()); + long count2 = docCount(".watcher-history*", matchAllQuery()); assertThat(count2, is(count1)); @@ -110,7 +110,7 @@ public void testDeactivateAndActivate() throws Exception { logger.info("Sleeping for another five seconds, ensuring that watch is executed"); Thread.sleep(5000); refresh(); - long count3 = docCount(".watcher-history*", "doc", matchAllQuery()); + long count3 = docCount(".watcher-history*", matchAllQuery()); assertThat(count3, greaterThan(count1)); } @@ -122,7 +122,7 @@ public void testLoadWatchWithoutAState() throws Exception { .setSource(watchBuilder() .trigger(schedule(cron("0 0 0 1 1 ? 2050"))) // some time in 2050 .input(simpleInput("foo", "bar")) - .addAction("_a1", indexAction("actions", "action1")) + .addAction("_a1", indexAction("actions")) .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) .get(); @@ -132,7 +132,7 @@ public void testLoadWatchWithoutAState() throws Exception { assertThat(getWatchResponse, notNullValue()); assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); - GetResponse getResponse = client().prepareGet(".watches", "doc", "_id").get(); + GetResponse getResponse = client().prepareGet().setIndex(".watches").setId("_id").get(); XContentSource source = new XContentSource(getResponse.getSourceAsBytesRef(), XContentType.JSON); Set filters = Sets.newHashSet( @@ -152,7 +152,7 @@ public void testLoadWatchWithoutAState() throws Exception { source.toXContent(builder, ToXContent.EMPTY_PARAMS); // now that we filtered out the watch status state, lets put it back in - IndexResponse indexResponse = client().prepareIndex(".watches", "doc", "_id") + IndexResponse indexResponse = client().prepareIndex().setIndex(".watches").setId("_id") .setSource(BytesReference.bytes(builder), XContentType.JSON) .get(); assertThat(indexResponse.getId(), is("_id")); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java index 956cce95f84c2..726a46799d401 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -69,8 +70,8 @@ public void testWatchNotFound() { String watchId = "my_watch_id"; doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new GetResponse(new GetResult(Watch.INDEX, Watch.DOC_TYPE, watchId, UNASSIGNED_SEQ_NO, 0, -1, false, - BytesArray.EMPTY, Collections.emptyMap()))); + listener.onResponse(new GetResponse(new GetResult(Watch.INDEX, MapperService.SINGLE_MAPPING_NAME, watchId, UNASSIGNED_SEQ_NO, + 0, -1, false, BytesArray.EMPTY, Collections.emptyMap()))); return null; }).when(client).get(anyObject(), anyObject()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index eaf952effd3c3..3e46f7102c192 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -21,8 +21,10 @@ import java.time.ZonedDateTime; import java.util.Collection; import java.util.Collections; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; /** * A mock scheduler to help with unit testing. Provide {@link ScheduleTriggerEngineMock#trigger} method to manually trigger @@ -31,7 +33,8 @@ public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine { private static final Logger logger = LogManager.getLogger(ScheduleTriggerEngineMock.class); - private final ConcurrentMap watches = new ConcurrentHashMap<>(); + private final AtomicReference> watches = new AtomicReference<>(new ConcurrentHashMap<>()); + private final AtomicBoolean paused = new AtomicBoolean(false); public ScheduleTriggerEngineMock(ScheduleRegistry scheduleRegistry, Clock clock) { super(scheduleRegistry, clock); @@ -49,29 +52,32 @@ public ScheduleTriggerEvent parseTriggerEvent(TriggerService service, String wat } @Override - public void start(Collection jobs) { - jobs.forEach(this::add); + public synchronized void start(Collection jobs) { + Map newWatches = new ConcurrentHashMap<>(); + jobs.forEach((watch) -> newWatches.put(watch.id(), watch)); + watches.set(newWatches); + paused.set(false); } @Override public void stop() { - watches.clear(); + watches.set(new ConcurrentHashMap<>()); } @Override - public void add(Watch watch) { + public synchronized void add(Watch watch) { logger.debug("adding watch [{}]", watch.id()); - watches.put(watch.id(), watch); + watches.get().put(watch.id(), watch); } @Override public void pauseExecution() { - watches.clear(); + paused.set(true); } @Override - public boolean remove(String jobId) { - return watches.remove(jobId) != null; + public synchronized boolean remove(String jobId) { + return watches.get().remove(jobId) != null; } public boolean trigger(String jobName) { @@ -79,7 +85,11 @@ public boolean trigger(String jobName) { } public boolean trigger(String jobName, int times, TimeValue interval) { - if (watches.containsKey(jobName) == false) { + if (watches.get().containsKey(jobName) == false) { + return false; + } + if (paused.get()) { + logger.info("not executing watch [{}] on this scheduler because it is paused", jobName); return false; } @@ -88,7 +98,7 @@ public boolean trigger(String jobName, int times, TimeValue interval) { logger.debug("firing watch [{}] at [{}]", jobName, now); ScheduleTriggerEvent event = new ScheduleTriggerEvent(jobName, now, now); consumers.forEach(consumer -> consumer.accept(Collections.singletonList(event))); - if (interval != null) { + if (interval != null) { if (clock instanceof ClockMock) { ((ClockMock) clock).fastForward(interval); } else { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java index 9f738d8daa6b2..74068538511bb 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java @@ -12,6 +12,12 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; import org.elasticsearch.xpack.watcher.condition.NeverCondition; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; + +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; @@ -19,8 +25,10 @@ import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.SECONDS; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class WatchStatusIntegrationTests extends AbstractWatcherIntegrationTestCase { @@ -40,13 +48,27 @@ public void testThatStatusGetsUpdated() { assertThat(getWatchResponse.getSource(), notNullValue()); assertThat(getWatchResponse.getStatus().lastChecked(), is(notNullValue())); - GetResponse getResponse = client().prepareGet(".watches", "doc", "_name").get(); + GetResponse getResponse = client().prepareGet().setIndex(".watches").setId("_name").get(); getResponse.getSource(); XContentSource source = new XContentSource(getResponse.getSourceAsBytesRef(), XContentType.JSON); + String lastChecked = source.getValue("status.last_checked"); + assertThat(lastChecked, WatcherTestUtils.isSameDate(getWatchResponse.getStatus().lastChecked())); + assertThat(getWatchResponse.getStatus().lastChecked(), isMillisResolution()); + // not started yet, so both nulls + String lastMetCondition = source.getValue("status.last_met_condition"); + assertThat(lastMetCondition, is(nullValue())); + assertThat(getWatchResponse.getStatus().lastMetCondition(), is(nullValue())); + } - assertThat(lastChecked, is(notNullValue())); - assertThat(getWatchResponse.getStatus().lastChecked().toString(), is(lastChecked)); + private Matcher isMillisResolution() { + return new FeatureMatcher(equalTo(true), "has millisecond precision", "precission") { + @Override + protected Boolean featureValueOf(ZonedDateTime actual) { + //if date has millisecond precision its nanosecond field will be rounded to millis (equal millis * 10^6) + return actual.getNano() == actual.get(ChronoField.MILLI_OF_SECOND) * 1000_000; + } + }; } } diff --git a/x-pack/qa/evil-tests/build.gradle b/x-pack/qa/evil-tests/build.gradle index ad32645b3647c..d411909fb310b 100644 --- a/x-pack/qa/evil-tests/build.gradle +++ b/x-pack/qa/evil-tests/build.gradle @@ -5,7 +5,7 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } -unitTest { +test { systemProperty 'tests.security.manager', 'false' include '**/*Tests.class' } diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index ecaf67205ac80..6754b1acb9347 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -98,7 +98,7 @@ public static void setupKerberos() throws Exception { } @AfterClass - public static void restoreLocale() throws Exception { + public static void restoreLocale() { if (restoreLocale != null) { Locale.setDefault(restoreLocale); restoreLocale = null; diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java index 8888ce33be57f..ec94af9b75fc4 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java @@ -90,7 +90,9 @@ public Boolean run() throws Exception { AccessController.doPrivileged(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - init(); + if (ESTestCase.awaitBusy(() -> init()) == false) { + throw new IllegalStateException("could not initialize SimpleKdcLdapServer"); + } return null; } }); @@ -98,14 +100,33 @@ public Void run() throws Exception { } @SuppressForbidden(reason = "Uses Apache Kdc which requires usage of java.io.File in order to create a SimpleKdcServer") - private void init() throws Exception { - // start ldap server - createLdapServiceAndStart(); - // create ldap backend conf - createLdapBackendConf(); - // Kdc Server - simpleKdc = new SimpleKdcServer(this.workDir.toFile(), new KrbConfig()); - prepareKdcServerAndStart(); + private boolean init() { + boolean initialized = false; + try { + // start ldap server + createLdapServiceAndStart(); + // create ldap backend conf + createLdapBackendConf(); + // Kdc Server + simpleKdc = new SimpleKdcServer(this.workDir.toFile(), new KrbConfig()); + prepareKdcServerAndStart(); + initialized = true; + } catch (Exception e) { + if (simpleKdc != null) { + try { + simpleKdc.stop(); + } catch (KrbException krbException) { + logger.debug("error occurred while cleaning up after init failure for SimpleKdcLdapServer"); + } + } + if (ldapServer != null) { + ldapServer.shutDown(true); + } + ldapPort = 0; + kdcPort = 0; + initialized = false; + } + return initialized; } private void createLdapServiceAndStart() throws Exception { @@ -229,12 +250,14 @@ private static int getServerPort(String transport) { if (transport != null && transport.trim().equalsIgnoreCase("TCP")) { try (ServerSocket serverSocket = ServerSocketFactory.getDefault().createServerSocket(0, 1, InetAddress.getByName("127.0.0.1"))) { + serverSocket.setReuseAddress(true); return serverSocket.getLocalPort(); } catch (Exception ex) { throw new RuntimeException("Failed to get a TCP server socket point"); } } else if (transport != null && transport.trim().equalsIgnoreCase("UDP")) { try (DatagramSocket socket = new DatagramSocket(0, InetAddress.getByName("127.0.0.1"))) { + socket.setReuseAddress(true); return socket.getLocalPort(); } catch (Exception ex) { throw new RuntimeException("Failed to get a UDP server socket point"); diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index fe7d4e9974cb7..964cc2fb43cc7 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -3,11 +3,11 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' -unitTest.enabled = false +apply plugin: 'elasticsearch.standalone-test' + +test.enabled = false dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here @@ -25,6 +25,7 @@ dependencies { // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper exclude group: "com.google.guava", module: "guava" } + testCompile project(path: ':qa:full-cluster-restart', configuration: 'testArtifacts') } Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> @@ -70,17 +71,6 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - -String coreFullClusterRestartPath = project(':qa:full-cluster-restart').projectDir.toPath().resolve('src/test/java').toString() -sourceSets { - test { - java { - srcDirs += [coreFullClusterRestartPath] - } - } -} - licenseHeaders { approvedLicenses << 'Apache' } @@ -89,207 +79,157 @@ forbiddenPatterns { exclude '**/system_key' } -// tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' - - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java", coreFullClusterRestartPath] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } +String outputDir = "${buildDir}/generated-resources/${project.name}" - licenseHeaders { - approvedLicenses << 'Apache' - } - - forbiddenPatterns { - exclude '**/system_key' - } - - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeystore(type: Copy) { - from project(':x-pack:plugin:core') - .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') +task copyTestNodeKeyMaterial(type: Copy) { + from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.indexCompatible) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeystore - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - - } - bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - clusterName = 'full-cluster-restart' - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" + } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + clusterName = 'full-cluster-restart' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' } else { - extraConfigFile 'x-pack/system_key', "${mainProject.projectDir}/src/test/resources/system_key" + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } } Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'true' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'true' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn oldClusterTestRunner, - "${baseName}#oldClusterTestCluster#node0.stop", - "${baseName}#oldClusterTestCluster#node1.stop" - numNodes = 2 - clusterName = 'full-cluster-restart' - dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } - cleanShared = false // We want to keep snapshots made by the old cluster! - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'testnode' - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } + dependsOn oldClusterTestRunner, + "${baseName}#oldClusterTestCluster#node0.stop", + "${baseName}#oldClusterTestCluster#node1.stop" + numNodes = 2 + clusterName = 'full-cluster-restart' + dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } + cleanShared = false // We want to keep snapshots made by the old cluster! + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + setting 'xpack.security.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" } Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'false' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'false' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for full cluster restarts, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task integTest { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedIndexCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedIndexCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - - check.dependsOn(integTest) +} - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('watcher'), configuration: 'runtime') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') - } +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackModule('core')).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index e4d96645b87b7..322e97db765ff 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ObjectPath; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.document.RestGetAction; @@ -22,16 +21,8 @@ import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; -import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.elasticsearch.xpack.watcher.actions.index.IndexAction; -import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; -import org.elasticsearch.xpack.watcher.common.text.TextTemplate; -import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; -import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; -import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; -import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.hamcrest.Matcher; +import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -55,11 +46,26 @@ import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + public static final String INDEX_ACTION_TYPES_DEPRECATION_MESSAGE = + "[types removal] Specifying types in a watcher index action is deprecated."; + + public static final String SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE = + "[types removal] Specifying types in a watcher search request is deprecated."; + + public static final int UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION = 6; + public static final int SECURITY_EXPECTED_INDEX_FORMAT_VERSION = 6; + + private String type; + + @Before + public void setType() { + type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -76,7 +82,7 @@ protected Settings restClientSettings() { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/testsingledoc/doc/1"; + String docLocation = "/testsingledoc/" + type + "/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -87,7 +93,9 @@ public void testSingleDoc() throws IOException { } Request getRequest = new Request("GET", docLocation); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } assertThat(toStr(client().performRequest(getRequest)), containsString(doc)); } @@ -101,11 +109,9 @@ public void testSecurityNativeRealm() throws Exception { Response settingsResponse = client().performRequest(new Request("GET", "/.security/_settings/index.format")); Map settingsResponseMap = entityAsMap(settingsResponse); logger.info("settings response map {}", settingsResponseMap); - final boolean needsUpgrade; final String concreteSecurityIndex; if (settingsResponseMap.isEmpty()) { - needsUpgrade = true; - concreteSecurityIndex = ".security"; + fail("The security index does not have the expected setting [index.format]"); } else { concreteSecurityIndex = settingsResponseMap.keySet().iterator().next(); Map indexSettingsMap = @@ -113,29 +119,12 @@ public void testSecurityNativeRealm() throws Exception { Map settingsMap = (Map) indexSettingsMap.get("settings"); logger.info("settings map {}", settingsMap); if (settingsMap.containsKey("index")) { + @SuppressWarnings("unchecked") int format = Integer.parseInt(String.valueOf(((Map)settingsMap.get("index")).get("format"))); - needsUpgrade = format == SecurityIndexManager.INTERNAL_INDEX_FORMAT ? false : true; - } else { - needsUpgrade = true; + assertEquals("The security index needs to be upgraded", SECURITY_EXPECTED_INDEX_FORMAT_VERSION, format); } } - if (needsUpgrade) { - logger.info("upgrading security index {}", concreteSecurityIndex); - // without upgrade, an error should be thrown - try { - createUser(false); - fail("should not be able to add a user when upgrade hasn't taken place"); - } catch (ResponseException e) { - assertThat(e.getMessage(), containsString("Security index is not on the current version - " + - "the native realm will not be operational until the upgrade API is run on the security index")); - } - // run upgrade API - Response upgradeResponse = client().performRequest( - new Request("POST", "_migration/upgrade/" + concreteSecurityIndex)); - logger.info("upgrade response:\n{}", toStr(upgradeResponse)); - } - // create additional user and role createUser(false); createRole(false); @@ -145,20 +134,26 @@ public void testSecurityNativeRealm() throws Exception { assertRoleInfo(isRunningAgainstOldCluster()); } + @SuppressWarnings("unchecked") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40178") public void testWatcher() throws Exception { if (isRunningAgainstOldCluster()) { logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); - Request createBwcWatch = new Request("PUT", "/_xpack/watcher/watch/bwc_watch"); + Request createBwcWatch = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_watch"); + Request createBwcThrottlePeriod = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_throttle_period"); + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) { + createBwcWatch.setOptions(expectWarnings(INDEX_ACTION_TYPES_DEPRECATION_MESSAGE)); + createBwcThrottlePeriod.setOptions(expectWarnings(INDEX_ACTION_TYPES_DEPRECATION_MESSAGE)); + } createBwcWatch.setJsonEntity(loadWatch("simple-watch.json")); client().performRequest(createBwcWatch); logger.info("Adding a watch with \"fun\" throttle periods on old cluster"); - Request createBwcThrottlePeriod = new Request("PUT", "_xpack/watcher/watch/bwc_throttle_period"); createBwcThrottlePeriod.setJsonEntity(loadWatch("throttle-period-watch.json")); client().performRequest(createBwcThrottlePeriod); logger.info("Adding a watch with \"fun\" read timeout on old cluster"); - Request createFunnyTimeout = new Request("PUT", "_xpack/watcher/watch/bwc_funny_timeout"); + Request createFunnyTimeout = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_funny_timeout"); createFunnyTimeout.setJsonEntity(loadWatch("funny-timeout-watch.json")); client().performRequest(createFunnyTimeout); @@ -171,34 +166,23 @@ public void testWatcher() throws Exception { logger.info("testing against {}", getOldClusterVersion()); waitForYellow(".watches,bwc_watch_index,.watcher-history*"); - logger.info("checking if the upgrade procedure on the new cluster is required"); - Map response = entityAsMap(client().performRequest(new Request("GET", "/_migration/assistance"))); - logger.info(response); - - @SuppressWarnings("unchecked") Map indices = (Map) response.get("indices"); - if (indices.containsKey(".watches")) { - logger.info("upgrade procedure is required for watcher"); - assertThat(indices.entrySet().size(), greaterThanOrEqualTo(1)); - assertThat(indices.get(".watches"), notNullValue()); - @SuppressWarnings("unchecked") Map index = (Map) indices.get(".watches"); - assertThat(index.get("action_required"), equalTo("upgrade")); - - logger.info("starting upgrade procedure on the new cluster"); - - Request migrationAssistantRequest = new Request("POST", "_migration/upgrade/.watches"); - migrationAssistantRequest.addParameter("error_trace", "true"); - Map upgradeResponse = entityAsMap(client().performRequest(migrationAssistantRequest)); - assertThat(upgradeResponse.get("timed_out"), equalTo(Boolean.FALSE)); - // we posted 3 watches, but monitoring can post a few more - assertThat((int) upgradeResponse.get("total"), greaterThanOrEqualTo(3)); - - logger.info("checking that upgrade procedure on the new cluster is no longer required"); - Map responseAfter = entityAsMap(client().performRequest( - new Request("GET", "/_migration/assistance"))); - @SuppressWarnings("unchecked") Map indicesAfter = (Map) responseAfter.get("indices"); - assertNull(indicesAfter.get(".watches")); + logger.info("checking that the Watches index is the correct version"); + + Response settingsResponse = client().performRequest(new Request("GET", "/.watches/_settings/index.format")); + Map settingsResponseMap = entityAsMap(settingsResponse); + logger.info("settings response map {}", settingsResponseMap); + final String concreteWatchesIndex; + if (settingsResponseMap.isEmpty()) { + fail("The security index does not have the expected setting [index.format]"); } else { - logger.info("upgrade procedure is not required for watcher"); + concreteWatchesIndex = settingsResponseMap.keySet().iterator().next(); + Map indexSettingsMap = (Map) settingsResponseMap.get(concreteWatchesIndex); + Map settingsMap = (Map) indexSettingsMap.get("settings"); + logger.info("settings map {}", settingsMap); + if (settingsMap.containsKey("index")) { + int format = Integer.parseInt(String.valueOf(((Map)settingsMap.get("index")).get("format"))); + assertEquals("The watches index needs to be upgraded", UPGRADE_FIELD_EXPECTED_INDEX_FORMAT_VERSION, format); + } } // Wait for watcher to actually start.... @@ -246,7 +230,11 @@ public void testRollupAfterRestart() throws Exception { // index documents for the rollup job final StringBuilder bulk = new StringBuilder(); for (int i = 0; i < numDocs; i++) { - bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n"); + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\"}}\n"); + } else { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n"); + } String date = String.format(Locale.ROOT, "%04d-01-01T00:%02d:00Z", year, i); bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n"); } @@ -257,7 +245,8 @@ public void testRollupAfterRestart() throws Exception { client().performRequest(bulkRequest); // create the rollup job - final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-job-test"); + final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test"); + createRollupJobRequest.setJsonEntity("{" + "\"index_pattern\":\"rollup-*\"," + "\"rollup_index\":\"results-rollup\"," @@ -278,7 +267,7 @@ public void testRollupAfterRestart() throws Exception { assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); // start the rollup job - final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-job-test/_start"); + final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-job-test/_start"); Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); @@ -304,12 +293,12 @@ public void testRollupIDSchemeAfterRestart() throws Exception { assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); if (isRunningAgainstOldCluster()) { - final Request indexRequest = new Request("POST", "/id-test-rollup/doc/1"); + final Request indexRequest = new Request("POST", "/id-test-rollup/" + type + "/1"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); client().performRequest(indexRequest); // create the rollup job - final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-id-test"); + final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-id-test"); createRollupJobRequest.setJsonEntity("{" + "\"index_pattern\":\"id-test-rollup\"," + "\"rollup_index\":\"id-test-results-rollup\"," @@ -337,7 +326,7 @@ public void testRollupIDSchemeAfterRestart() throws Exception { assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); // start the rollup job - final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-id-test/_start"); + final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-id-test/_start"); Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); @@ -365,9 +354,11 @@ public void testRollupIDSchemeAfterRestart() throws Exception { } else { - final Request indexRequest = new Request("POST", "/id-test-rollup/doc/2"); + final Request indexRequest = new Request("POST", "/id-test-rollup/" + type + "/2"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); - indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } client().performRequest(indexRequest); assertRollUpJob("rollup-id-test"); @@ -431,12 +422,8 @@ public void testSqlFailsOnIndexWithTwoTypes() throws IOException { client().performRequest(doc2); return; } - final Request sqlRequest; - if (isRunningAgainstOldCluster()) { - sqlRequest = new Request("POST", "/_xpack/sql"); - } else { - sqlRequest = new Request("POST", "/_sql"); - } + final Request sqlRequest = new Request("POST", getSQLEndpoint()); + sqlRequest.setJsonEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}"); ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(sqlRequest)); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); @@ -458,8 +445,21 @@ private void assertOldTemplatesAreDeleted() throws IOException { private void assertWatchIndexContentsWork() throws Exception { // Fetch a basic watch Request getRequest = new Request("GET", "_watcher/watch/bwc_watch"); - getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_7_0_0)) { + getRequest.setOptions( + expectWarnings( + INDEX_ACTION_TYPES_DEPRECATION_MESSAGE, + SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE + ) + ); + } else { + getRequest.setOptions( + expectWarnings( + INDEX_ACTION_TYPES_DEPRECATION_MESSAGE + ) + ); + } + Map bwcWatch = entityAsMap(client().performRequest(getRequest)); logger.error("-----> {}", bwcWatch); @@ -475,8 +475,20 @@ private void assertWatchIndexContentsWork() throws Exception { // Fetch a watch with "fun" throttle periods getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period"); - getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_7_0_0)) { + getRequest.setOptions( + expectWarnings( + INDEX_ACTION_TYPES_DEPRECATION_MESSAGE, + SEARCH_INPUT_TYPES_DEPRECATION_MESSAGE + ) + ); + } else { + getRequest.setOptions( + expectWarnings( + INDEX_ACTION_TYPES_DEPRECATION_MESSAGE + ) + ); + } bwcWatch = entityAsMap(client().performRequest(getRequest)); assertThat(bwcWatch.get("found"), equalTo(true)); source = (Map) bwcWatch.get("watch"); @@ -515,10 +527,9 @@ private void assertWatchIndexContentsWork() throws Exception { private void assertBasicWatchInteractions() throws Exception { - String watch = new WatchSourceBuilder() - .condition(InternalAlwaysCondition.INSTANCE) - .trigger(ScheduleTrigger.builder(new IntervalSchedule(IntervalSchedule.Interval.seconds(1)))) - .addAction("awesome", LoggingAction.builder(new TextTemplate("test"))).buildAsBytes(XContentType.JSON).utf8ToString(); + String watch = "{\"trigger\":{\"schedule\":{\"interval\":\"1s\"}},\"input\":{\"none\":{}}," + + "\"condition\":{\"always\":{}}," + + "\"actions\":{\"awesome\":{\"logging\":{\"level\":\"info\",\"text\":\"test\"}}}}"; Request createWatchRequest = new Request("PUT", "_watcher/watch/new_watch"); createWatchRequest.setJsonEntity(watch); Map createWatch = entityAsMap(client().performRequest(createWatchRequest)); @@ -560,7 +571,13 @@ private void waitForHits(String indexName, int expectedHits) throws Exception { try { Map response = entityAsMap(client().performRequest(request)); Map hits = (Map) response.get("hits"); - int total = (int) hits.get("total"); + logger.info("Hits are: {}", hits); + int total; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + total = (int) ((Map) hits.get("total")).get("value"); + } else { + total = (int) hits.get("total"); + } assertThat(total, greaterThanOrEqualTo(expectedHits)); } catch (IOException ioe) { if (ioe instanceof ResponseException) { @@ -580,12 +597,7 @@ static String toStr(Response response) throws IOException { private void createUser(final boolean oldCluster) throws Exception { final String id = oldCluster ? "preupgrade_user" : "postupgrade_user"; - Request request; - if (oldCluster) { - request = new Request("PUT", "/_xpack/security/user/" + id); - } else { - request = new Request("PUT", "/_security/user/" + id); - } + Request request = new Request("PUT", getSecurityEndpoint() + "/user/" + id); request.setJsonEntity( "{\n" + " \"password\" : \"j@rV1s\",\n" + @@ -599,12 +611,7 @@ private void createUser(final boolean oldCluster) throws Exception { private void createRole(final boolean oldCluster) throws Exception { final String id = oldCluster ? "preupgrade_role" : "postupgrade_role"; - Request request; - if (oldCluster) { - request = new Request("PUT", "/_xpack/security/role/" + id); - } else { - request = new Request("PUT", "/_security/role/" + id); - } + Request request = new Request("PUT", getSecurityEndpoint() + "/role/" + id); request.setJsonEntity( "{\n" + " \"run_as\": [ \"abc\" ],\n" + @@ -625,20 +632,59 @@ private void createRole(final boolean oldCluster) throws Exception { private void assertUserInfo(final boolean oldCluster) throws Exception { final String user = oldCluster ? "preupgrade_user" : "postupgrade_user"; - Map response = oldCluster ? - entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/user/" + user))) : - entityAsMap(client().performRequest(new Request("GET", "/_security/user/" + user))); + Request request = new Request("GET", getSecurityEndpoint() + "/user/" + user);; + Map response = entityAsMap(client().performRequest(request)); @SuppressWarnings("unchecked") Map userInfo = (Map) response.get(user); assertEquals(user + "@example.com", userInfo.get("email")); assertNotNull(userInfo.get("full_name")); assertNotNull(userInfo.get("roles")); } + private String getSecurityEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_security"; + } else { + securityEndpoint = "/_xpack/security"; + } + return securityEndpoint; + } + + private String getSQLEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_sql"; + } else { + securityEndpoint = "/_xpack/sql"; + } + return securityEndpoint; + } + + private String getRollupEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_rollup"; + } else { + securityEndpoint = "/_xpack/rollup"; + } + return securityEndpoint; + } + + private String getWatcherEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_watcher"; + } else { + securityEndpoint = "/_xpack/watcher"; + } + return securityEndpoint; + } + private void assertRoleInfo(final boolean oldCluster) throws Exception { final String role = oldCluster ? "preupgrade_role" : "postupgrade_role"; - @SuppressWarnings("unchecked") Map response = oldCluster ? - (Map) entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/role/" + role))).get(role) : - (Map) entityAsMap(client().performRequest(new Request("GET", "/_security/role/" + role))).get(role); + @SuppressWarnings("unchecked") Map response = (Map) entityAsMap( + client().performRequest(new Request("GET", getSecurityEndpoint() + "/role/" + role)) + ).get(role); assertNotNull(response.get("run_as")); assertNotNull(response.get("cluster")); assertNotNull(response.get("indices")); @@ -650,12 +696,7 @@ private void assertRollUpJob(final String rollupJob) throws Exception { waitForRollUpJob(rollupJob, expectedStates); // check that the rollup job is started using the RollUp API - final Request getRollupJobRequest; - if (isRunningAgainstOldCluster()) { - getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob); - } else { - getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob); - } + final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob); Map getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest)); Map job = getJob(getRollupJobResponse, rollupJob); assertNotNull(job); @@ -700,12 +741,8 @@ private void assertRollUpJob(final String rollupJob) throws Exception { private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { assertBusy(() -> { - final Request getRollupJobRequest; - if (isRunningAgainstOldCluster()) { - getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob); - } else { - getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob); - } + final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob); + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 36673fa4984c9..b881af65420aa 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -7,14 +7,16 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -72,9 +74,7 @@ private void createTestIndex() throws IOException { "\"airline\": {\"type\": \"keyword\"}," + "\"responsetime\": {\"type\": \"float\"}" + "}}}}"); - RequestOptions.Builder options = createTestIndex.getOptions().toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - createTestIndex.setOptions(options); + createTestIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createTestIndex); } @@ -129,6 +129,7 @@ private void oldClusterTests() throws IOException { dfBuilder.setDelayedDataCheckConfig(null); } dfBuilder.setIndices(Collections.singletonList("airline-data")); + addAggregations(dfBuilder); Request putDatafeed = new Request("PUT", "_xpack/ml/datafeeds/" + OLD_CLUSTER_STARTED_DATAFEED_ID); putDatafeed.setJsonEntity(Strings.toString(dfBuilder.build())); @@ -262,4 +263,11 @@ private void assertJobNotPresent(String jobId, List> jobs) { .filter(id -> id.equals(jobId)).findFirst(); assertFalse(config.isPresent()); } + + private void addAggregations(DatafeedConfig.Builder dfBuilder) { + TermsAggregationBuilder airline = AggregationBuilders.terms("airline"); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time").subAggregation(airline); + dfBuilder.setParsedAggregations(AggregatorFactories.builder().addAggregator( + AggregationBuilders.histogram("time").interval(300000).subAggregation(maxTime).field("time"))); + } } diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/qa/full-cluster-restart/without-system-key/build.gradle b/x-pack/qa/full-cluster-restart/without-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index a59becbfe6b54..88248f89b72c5 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -2,9 +2,14 @@ import java.nio.file.Path import java.nio.file.Paths import java.nio.file.Files -apply plugin: 'elasticsearch.vagrantsupport' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.test.fixtures' + +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" + +// https://github.com/elastic/elasticsearch/issues/40624 +integTest.enabled = false dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" @@ -12,75 +17,6 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} - -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -// lazily resolve to avoid any slowdowns from DNS lookups prior to when we need this value -Object httpPrincipal = new Object() { - @Override - String toString() { - InetAddress resolvedAddress = InetAddress.getByName('127.0.0.1') - return "HTTP/" + resolvedAddress.getCanonicalHostName() - } -} - -String realm = "BUILD.ELASTIC.CO" - -task 'addPrincipal#peppa'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh peppa " - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#george'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh george dino" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#HTTP'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $httpPrincipal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task krb5AddPrincipals { dependsOn krb5kdcFixture, 'addPrincipal#peppa', 'addPrincipal#george', 'addPrincipal#HTTP' } - -def generatedResources = "$buildDir/generated-resources/keytabs" -task copyKeytabToGeneratedResources(type: Copy) { - Path peppaKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("peppa.keytab").toAbsolutePath() - from peppaKeytab; - into generatedResources - dependsOn krb5AddPrincipals -} - integTestCluster { // force localhost IPv4 otherwise it is a chicken and egg problem where we need the keytab for the hostname when starting the cluster // but do not know the exact address that is first in the http ports file @@ -96,12 +32,10 @@ integTestCluster { setting 'xpack.security.authc.realms.kerberos.kerberos.krb.debug', 'true' setting 'xpack.security.authc.realms.kerberos.kerberos.remove_realm_name', 'false' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - String jvmArgsStr = " -Djava.security.krb5.conf=${krb5conf}" + " -Dsun.security.krb5.debug=true" - jvmArgs jvmArgsStr - Path esKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - .resolve("$httpPrincipal".replace('/', '_') + ".keytab").toAbsolutePath() - extraConfigFile("es.keytab", "${esKeytab}") + jvmArgs += " -Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}" + jvmArgs += " -Dsun.security.krb5.debug=true" + + extraConfigFile("es.keytab", project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "HTTP_localhost.keytab")) setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" @@ -119,6 +53,7 @@ integTestCluster { } +String realm = "BUILD.ELASTIC.CO" integTestRunner { Path peppaKeytab = Paths.get("${project.buildDir}", "generated-resources", "keytabs", "peppa.keytab") systemProperty 'test.userkt', "peppa@${realm}" @@ -126,16 +61,17 @@ integTestRunner { systemProperty 'test.userpwd', "george@${realm}" systemProperty 'test.userpwd.password', "dino" systemProperty 'tests.security.manager', 'true' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - List jvmargs = ["-Djava.security.krb5.conf=${krb5conf}","-Dsun.security.krb5.debug=true"] - jvmArgs jvmargs + jvmArgs([ + "-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}", + "-Dsun.security.krb5.debug=true" + ]) } -if (project.rootProject.vagrantSupported == false) { - integTest.enabled = false - testingConventions.enabled = false -} else { - project.sourceSets.test.output.dir(generatedResources) - integTestCluster.dependsOn krb5AddPrincipals, krb5kdcFixture, copyKeytabToGeneratedResources - integTest.finalizedBy project(':test:fixtures:krb5kdc-fixture').halt +def generatedResources = "$buildDir/generated-resources/keytabs" +task copyKeytabToGeneratedResources(type: Copy) { + from project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "peppa.keytab") + into generatedResources + dependsOn project(':test:fixtures:krb5kdc-fixture').postProcessFixture } +project.sourceSets.test.output.dir(generatedResources, builtBy:copyKeytabToGeneratedResources) + diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle index 63265b6949f1e..c31b2c0ad1d5e 100644 --- a/x-pack/qa/multi-cluster-search-security/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -75,5 +75,5 @@ task integTest { dependsOn = [mixedClusterTest] } -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test check.dependsOn(integTest) diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 6e2c91dff75ab..5305699b9a0c7 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -1,8 +1,5 @@ -Project idpFixtureProject = xpackProject("test:idp-fixture") -evaluationDependsOn(idpFixtureProject.path) - apply plugin: 'elasticsearch.standalone-test' -apply plugin: 'elasticsearch.vagrantsupport' +apply plugin: 'elasticsearch.test.fixtures' dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here @@ -11,21 +8,13 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } -task openLdapFixture { - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion", idpFixtureProject.up -} +testFixtures.useFixture ":x-pack:test:idp-fixture" +Project idpFixtureProject = xpackProject("test:idp-fixture") String outputDir = "${project.buildDir}/generated-resources/${project.name}" task copyIdpTrust(type: Copy) { - from idpFixtureProject.file('src/main/resources/certs/idptrust.jks'); - from idpFixtureProject.file('src/main/resources/certs/ca.crt'); + from idpFixtureProject.file('openldap/certs/ca.jks'); + from idpFixtureProject.file('openldap/certs/ca_server.pem'); into outputDir } -if (project.rootProject.vagrantSupported) { - project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpTrust) - unitTest.dependsOn openLdapFixture - unitTest.finalizedBy idpFixtureProject.halt -} else { - unitTest.enabled = false - testingConventions.enabled = false -} +project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpTrust) diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index bb88103048a40..61854a5396bba 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -52,12 +52,12 @@ public class OpenLdapTests extends ESTestCase { - public static final String OPEN_LDAP_DNS_URL = "ldaps://localhost:60636"; - public static final String OPEN_LDAP_IP_URL = "ldaps://127.0.0.1:60636"; + public static final String OPEN_LDAP_DNS_URL = "ldaps://localhost:" + getFromProperty("636"); + public static final String OPEN_LDAP_IP_URL = "ldaps://127.0.0.1:" + getFromProperty("636"); public static final String PASSWORD = "NickFuryHeartsES"; private static final String HAWKEYE_DN = "uid=hawkeye,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; - public static final String LDAPTRUST_PATH = "/idptrust.jks"; + public static final String LDAPTRUST_PATH = "/ca.jks"; private static final SecureString PASSWORD_SECURE_STRING = new SecureString(PASSWORD.toCharArray()); public static final String REALM_NAME = "oldap-test"; @@ -85,7 +85,7 @@ public void initializeSslSocketFactory() throws Exception { Path truststore = getDataPath(LDAPTRUST_PATH); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ MockSecureSettings mockSecureSettings = new MockSecureSettings(); @@ -307,4 +307,11 @@ private Map resolve(LDAPConnection connection, LdapMetaDataResol resolver.resolve(connection, HAWKEYE_DN, TimeValue.timeValueSeconds(1), logger, null, future); return future.get(); } + + private static String getFromProperty(String port) { + String key = "test.fixtures.openldap.tcp." + port; + final String value = System.getProperty(key); + assertNotNull("Expected the actual value for port " + port + " to be in system property " + key, value); + return value; + } } diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java index a4351b696784a..9ac936d043892 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java @@ -48,14 +48,14 @@ public class OpenLdapUserSearchSessionFactoryTests extends ESTestCase { private Settings globalSettings; private ThreadPool threadPool; - private static final String LDAPCACERT_PATH = "/ca.crt"; + private static final String LDAPCACERT_PATH = "/ca_server.pem"; @Before public void init() { Path caPath = getDataPath(LDAPCACERT_PATH); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ globalSettings = Settings.builder() diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java index 98794dd4f705c..036cf8ad0db33 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java @@ -181,6 +181,6 @@ protected String bindPassword() { @Override protected String trustPath() { - return "/idptrust.jks"; + return "/ca.jks"; } } diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 3d415e0e2922a..64e1c61b60717 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -1,10 +1,4 @@ -import javax.net.ssl.HttpsURLConnection -import javax.net.ssl.KeyManager -import javax.net.ssl.SSLContext -import javax.net.ssl.TrustManagerFactory -import java.nio.charset.StandardCharsets -import java.security.KeyStore -import java.security.SecureRandom +import org.elasticsearch.gradle.http.WaitForHttpResource apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -57,48 +51,11 @@ integTestCluster { 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role } waitCondition = { node, ant -> - // Load the CA PKCS#12 file as a truststore - KeyStore ks = KeyStore.getInstance("PKCS12"); - ks.load(caFile.newInputStream(), 'password'.toCharArray()); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(ks); - - // Configre a SSL context for TLS1.2 using our CA trust manager - SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - sslContext.init(new KeyManager[0], tmf.getTrustManagers(), new SecureRandom()); - - // Check whether the cluster has started - URL url = new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow"); - for (int i = 20; i >= 0; i--) { - // we use custom wait logic here for HTTPS - HttpsURLConnection httpURLConnection = null; - try { - logger.info("Trying ${url}"); - httpURLConnection = (HttpsURLConnection) url.openConnection(); - httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); - httpURLConnection.setRequestProperty("Authorization", - "Basic " + Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - logger.info("Cluster has started"); - return true; - } else { - logger.debug("HTTP response was [{}]", httpURLConnection.getResponseCode()); - } - } catch (IOException e) { - if (i == 0) { - logger.error("Failed to call cluster health - " + e) - } - logger.debug("Call to [{}] threw an exception", url, e) - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - // did not start, so wait a bit before trying again - Thread.sleep(750L); - } - return false; + WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) + http.setTrustStoreFile(caFile) + http.setTrustStorePassword("password") + http.setUsername("test_admin") + http.setPassword("x-pack-test-password") + return http.wait(5000) } } diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 7ede2424f0780..90ca5f600b198 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -122,17 +122,17 @@ for (Version version : bwcVersions.wireCompatible) { } } -unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test // basic integ tests includes testing bwc against the most recent version -task integTest { +task bwcTestSnapshots { if (project.bwc_tests_enabled) { for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } } -check.dependsOn(integTest) +check.dependsOn(bwcTestSnapshots) compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" @@ -154,9 +154,11 @@ project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) repositories { maven { + name "elastic" url "https://artifacts.elastic.co/maven" } maven { + name "elastic-snapshots" url "https://snapshots.elastic.co/maven" } } diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle new file mode 100644 index 0000000000000..2715b4e7024e7 --- /dev/null +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -0,0 +1,270 @@ +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + testCompile project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit +} + +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { + description = 'Runs backwards compatibility tests.' + group = 'verification' +} + +for (Version version : bwcVersions.wireCompatible) { + String taskPrefix = "v${version}" + + // ============================================================================================ + // Create leader cluster + // ============================================================================================ + + RestIntegTestTask leaderClusterTest = tasks.create(name: "${taskPrefix}#leader#clusterTest", type: RestIntegTestTask) { + mustRunAfter(precommit) + } + + configure(extensions.findByName("${taskPrefix}#leader#clusterTestCluster")) { + bwcVersion = version + numBwcNodes = 3 + numNodes = 3 + clusterName = 'leader' + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + } + + Task leaderClusterTestRunner = tasks.getByName("${taskPrefix}#leader#clusterTestRunner") + leaderClusterTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'none' + systemProperty 'tests.rest.cluster_name', 'leader' + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + } + + // ============================================================================================ + // Create follower cluster + // ============================================================================================ + + RestIntegTestTask followerClusterTest = tasks.create(name: "${taskPrefix}#follower#clusterTest", type: RestIntegTestTask) { + mustRunAfter(precommit) + } + + configure(extensions.findByName("${taskPrefix}#follower#clusterTestCluster")) { + dependsOn leaderClusterTestRunner + bwcVersion = version + numBwcNodes = 3 + numNodes = 3 + clusterName = 'follower' + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + } + + Task followerClusterTestRunner = tasks.getByName("${taskPrefix}#follower#clusterTestRunner") + followerClusterTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'none' + systemProperty 'tests.rest.cluster_name', 'follower' + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(0).transportUri()}" + } + + // ============================================================================================ + // Upgrade follower cluster + // ============================================================================================ + + Closure configureUpgradeCluster = {String prefix, String cluster, String name, Task lastRunner, int stopNode, + RestIntegTestTask clusterTest, Closure getOtherUnicastHostAddresses -> + configure(extensions.findByName("${prefix}#${cluster}#${name}")) { + dependsOn lastRunner, "${prefix}#${cluster}#clusterTestCluster#node${stopNode}.stop" + clusterName = cluster + otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } + minimumMasterNodes = { 2 } + autoSetInitialMasterNodes = false + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> clusterTest.nodes[stopNode].dataDir } + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'node.name', "upgraded-node-${cluster}-${stopNode}" + setting 'node.attr.upgraded', 'true' + } + } + + Task followerOneThirdUpgradedTest = tasks.create(name: "${taskPrefix}#follower#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'follower', 'oneThirdUpgradedTestCluster', followerClusterTestRunner, 0, followerClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [followerClusterTest.nodes.get(1).transportUri(), followerClusterTest.nodes.get(2).transportUri()] }) + + Task followerOneThirdUpgradedTestRunner = tasks.getByName("${taskPrefix}#follower#oneThirdUpgradedTestRunner") + followerOneThirdUpgradedTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'one_third' + systemProperty 'tests.rest.cluster_name', 'follower' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(1).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(1).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + + finalizedBy "${taskPrefix}#follower#clusterTestCluster#node1.stop" + } + + Task followerTwoThirdsUpgradedTest = tasks.create(name: "${taskPrefix}#follower#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'follower', 'twoThirdsUpgradedTestCluster', followerOneThirdUpgradedTestRunner, 1, followerClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [followerClusterTest.nodes.get(2).transportUri(), followerOneThirdUpgradedTest.nodes.get(0).transportUri()] }) + + Task followerTwoThirdsUpgradedTestRunner = tasks.getByName("${taskPrefix}#follower#twoThirdsUpgradedTestRunner") + followerTwoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'two_third' + systemProperty 'tests.rest.cluster_name', 'follower' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerClusterTest.nodes.get(2).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerClusterTest.nodes.get(2).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + + finalizedBy "${taskPrefix}#follower#clusterTestCluster#node2.stop" + } + + Task followerUpgradedClusterTest = tasks.create(name: "${taskPrefix}#follower#upgradedClusterTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'follower', 'upgradedClusterTestCluster', followerTwoThirdsUpgradedTestRunner, 2, followerClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [followerOneThirdUpgradedTest.nodes.get(0).transportUri(), followerTwoThirdsUpgradedTest.nodes.get(0).transportUri()] }) + + Task followerUpgradedClusterTestRunner = tasks.getByName("${taskPrefix}#follower#upgradedClusterTestRunner") + followerUpgradedClusterTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'all' + systemProperty 'tests.rest.cluster_name', 'follower' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerOneThirdUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerOneThirdUpgradedTest.nodes.get(0).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(0).transportUri()}" + + // This is needed, otherwise leader node 0 will stop after the leaderClusterTestRunner task has run. + // Here it is ok to stop, because in the next task, the leader node 0 gets upgraded. + finalizedBy "v${version}#leader#clusterTestCluster#node0.stop" + } + + // ============================================================================================ + // Upgrade leader cluster + // ============================================================================================ + + Task leaderOneThirdUpgradedTest = tasks.create(name: "${taskPrefix}#leader#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'leader', 'oneThirdUpgradedTestCluster', followerUpgradedClusterTestRunner, 0, leaderClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [leaderClusterTest.nodes.get(1).transportUri(), leaderClusterTest.nodes.get(2).transportUri()] }) + + Task leaderOneThirdUpgradedTestRunner = tasks.getByName("${taskPrefix}#leader#oneThirdUpgradedTestRunner") + leaderOneThirdUpgradedTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'one_third' + systemProperty 'tests.rest.cluster_name', 'leader' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(2).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderClusterTest.nodes.get(2).transportUri()}" + + finalizedBy "${taskPrefix}#leader#clusterTestCluster#node1.stop" + } + + Task leaderTwoThirdsUpgradedTest = tasks.create(name: "${taskPrefix}#leader#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'leader', 'twoThirdsUpgradedTestCluster', leaderOneThirdUpgradedTestRunner, 1, leaderClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [leaderClusterTest.nodes.get(2).transportUri(), leaderOneThirdUpgradedTest.nodes.get(0).transportUri()] }) + + Task leaderTwoThirdsUpgradedTestRunner = tasks.getByName("${taskPrefix}#leader#twoThirdsUpgradedTestRunner") + leaderTwoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'two_third' + systemProperty 'tests.rest.cluster_name', 'leader' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderOneThirdUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderOneThirdUpgradedTest.nodes.get(0).transportUri()}" + + finalizedBy "${taskPrefix}#leader#clusterTestCluster#node2.stop" + } + + Task leaderUpgradedClusterTest = tasks.create(name: "${taskPrefix}#leader#upgradedClusterTest", type: RestIntegTestTask) + + configureUpgradeCluster(taskPrefix, 'leader', "upgradedClusterTestCluster", leaderTwoThirdsUpgradedTestRunner, 2, leaderClusterTest, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [leaderOneThirdUpgradedTest.nodes.get(0).transportUri(), leaderTwoThirdsUpgradedTest.nodes.get(0).transportUri()] }) + + Task leaderUpgradedClusterTestRunner = tasks.getByName("${taskPrefix}#leader#upgradedClusterTestRunner") + leaderUpgradedClusterTestRunner.configure { + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + systemProperty 'tests.rest.upgrade_state', 'all' + systemProperty 'tests.rest.cluster_name', 'leader' + + nonInputProperties.systemProperty 'tests.follower_host', "${-> followerUpgradedClusterTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.follower_remote_cluster_seed', "${-> followerUpgradedClusterTest.nodes.get(0).transportUri()}" + + nonInputProperties.systemProperty 'tests.leader_host', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).httpUri()}" + nonInputProperties.systemProperty 'tests.leader_remote_cluster_seed', "${-> leaderTwoThirdsUpgradedTest.nodes.get(0).transportUri()}" + + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${taskPrefix}#follower#oneThirdUpgradedTestCluster#stop" + finalizedBy "${taskPrefix}#follower#twoThirdsUpgradedTestCluster#stop" + finalizedBy "${taskPrefix}#follower#upgradedClusterTestCluster#stop" + finalizedBy "${taskPrefix}#leader#oneThirdUpgradedTestCluster#stop" + finalizedBy "${taskPrefix}#leader#twoThirdsUpgradedTestCluster#stop" + } + + if (project.bwc_tests_enabled) { + Task versionBwcTest = tasks.create(name: "${taskPrefix}#bwcTest") { + dependsOn = [leaderUpgradedClusterTest] + } + bwcTest.dependsOn(versionBwcTest) + } +} + +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test + +// basic integ tests includes testing bwc against the most recent version +task integTest { + if (project.bwc_tests_enabled) { + for (final def version : bwcVersions.unreleasedWireCompatible) { + dependsOn "v${version}#bwcTest" + } + } +} +check.dependsOn(integTest) \ No newline at end of file diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/AbstractMultiClusterUpgradeTestCase.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/AbstractMultiClusterUpgradeTestCase.java new file mode 100644 index 0000000000000..3221e899eac36 --- /dev/null +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/AbstractMultiClusterUpgradeTestCase.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.HttpHost; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public abstract class AbstractMultiClusterUpgradeTestCase extends ESRestTestCase { + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + enum UpgradeState { + NONE, + ONE_THIRD, + TWO_THIRD, + ALL; + + public static UpgradeState parse(String value) { + switch (value) { + case "none": + return NONE; + case "one_third": + return ONE_THIRD; + case "two_third": + return TWO_THIRD; + case "all": + return ALL; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected final UpgradeState upgradeState = UpgradeState.parse(System.getProperty("tests.rest.upgrade_state")); + + enum ClusterName { + LEADER, + FOLLOWER; + + public static ClusterName parse(String value) { + switch (value) { + case "leader": + return LEADER; + case "follower": + return FOLLOWER; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected final ClusterName clusterName = ClusterName.parse(System.getProperty("tests.rest.cluster_name")); + + protected static final Version UPGRADE_FROM_VERSION = + Version.fromString(System.getProperty("tests.upgrade_from_version")); + + private static RestClient leaderClient; + private static RestClient followerClient; + private static boolean initialized = false; + + @Before + public void initClientsAndConfigureClusters() throws IOException { + String leaderHost = System.getProperty("tests.leader_host"); + if (leaderHost == null) { + throw new AssertionError("leader host is missing"); + } + + if (initialized) { + return; + } + + String followerHost = System.getProperty("tests.follower_host"); + if (clusterName == ClusterName.LEADER) { + leaderClient = buildClient(leaderHost); + if (followerHost != null) { + followerClient = buildClient(followerHost); + } + } else if (clusterName == ClusterName.FOLLOWER) { + if (followerHost == null) { + throw new AssertionError("follower host is missing"); + } + + leaderClient = buildClient(leaderHost); + followerClient = buildClient(followerHost); + } else { + throw new AssertionError("unknown cluster name: " + clusterName); + } + + configureLeaderRemoteClusters(); + configureFollowerRemoteClusters(); + initialized = true; + } + + private void configureLeaderRemoteClusters() throws IOException { + String leaderRemoteClusterSeed = System.getProperty("tests.leader_remote_cluster_seed"); + if (leaderRemoteClusterSeed != null) { + logger.info("Configuring leader remote cluster [{}]", leaderRemoteClusterSeed); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{\"persistent\": {\"cluster.remote.leader.seeds\": \"" + leaderRemoteClusterSeed + "\"}}"); + assertThat(leaderClient.performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); + if (followerClient != null) { + assertThat(followerClient.performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); + } + } else { + logger.info("No leader remote cluster seed found."); + } + } + + private void configureFollowerRemoteClusters() throws IOException { + String followerRemoteClusterSeed = System.getProperty("tests.follower_remote_cluster_seed"); + if (followerRemoteClusterSeed != null) { + logger.info("Configuring follower remote cluster [{}]", followerRemoteClusterSeed); + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{\"persistent\": {\"cluster.remote.follower.seeds\": \"" + followerRemoteClusterSeed + "\"}}"); + assertThat(leaderClient.performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); + assertThat(followerClient.performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); + } else { + logger.info("No follower remote cluster seed found."); + } + } + + @AfterClass + public static void destroyClients() throws IOException { + try { + IOUtils.close(leaderClient, followerClient); + } finally { + leaderClient = null; + followerClient = null; + } + } + + protected static RestClient leaderClient() { + return leaderClient; + } + + protected static RestClient followerClient() { + return followerClient; + } + + private RestClient buildClient(final String url) throws IOException { + int portSeparator = url.lastIndexOf(':'); + HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + return buildClient(restAdminSettings(), new HttpHost[]{httpHost}); + } + + protected static Map toMap(Response response) throws IOException { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + } + +} diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java new file mode 100644 index 0000000000000..371b5667c7d80 --- /dev/null +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -0,0 +1,391 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.support.XContentMapValues; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class CcrRollingUpgradeIT extends AbstractMultiClusterUpgradeTestCase { + + public void testUniDirectionalIndexFollowing() throws Exception { + logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); + + if (clusterName == ClusterName.LEADER) { + switch (upgradeState) { + case NONE: + createLeaderIndex(leaderClient(), "leader_index1"); + index(leaderClient(), "leader_index1", 64); + createLeaderIndex(leaderClient(), "leader_index2"); + index(leaderClient(), "leader_index2", 64); + break; + case ONE_THIRD: + break; + case TWO_THIRD: + break; + case ALL: + createLeaderIndex(leaderClient(), "leader_index4"); + followIndex(followerClient(), "leader", "leader_index4", "follower_index4"); + index(leaderClient(), "leader_index4", 64); + assertTotalHitCount("follower_index4", 64, followerClient()); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else if (clusterName == ClusterName.FOLLOWER) { + switch (upgradeState) { + case NONE: + followIndex(followerClient(), "leader", "leader_index1", "follower_index1"); + assertTotalHitCount("follower_index1", 64, followerClient()); + break; + case ONE_THIRD: + index(leaderClient(), "leader_index1", 64); + assertTotalHitCount("follower_index1", 128, followerClient()); + + followIndex(followerClient(), "leader", "leader_index2", "follower_index2"); + assertTotalHitCount("follower_index2", 64, followerClient()); + break; + case TWO_THIRD: + index(leaderClient(), "leader_index1", 64); + assertTotalHitCount("follower_index1", 192, followerClient()); + + index(leaderClient(), "leader_index2", 64); + assertTotalHitCount("follower_index2", 128, followerClient()); + + createLeaderIndex(leaderClient(), "leader_index3"); + index(leaderClient(), "leader_index3", 64); + followIndex(followerClient(), "leader", "leader_index3", "follower_index3"); + assertTotalHitCount("follower_index3", 64, followerClient()); + break; + case ALL: + index(leaderClient(), "leader_index1", 64); + assertTotalHitCount("follower_index1", 256, followerClient()); + + index(leaderClient(), "leader_index2", 64); + assertTotalHitCount("follower_index2", 192, followerClient()); + + index(leaderClient(), "leader_index3", 64); + assertTotalHitCount("follower_index3", 128, followerClient()); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else { + throw new AssertionError("unexpected cluster_name [" + clusterName + "]"); + } + } + + public void testAutoFollowing() throws Exception { + String leaderIndex1 = "logs-20200101"; + String leaderIndex2 = "logs-20200102"; + String leaderIndex3 = "logs-20200103"; + + if (clusterName == ClusterName.LEADER) { + switch (upgradeState) { + case NONE: + case ONE_THIRD: + case TWO_THIRD: + break; + case ALL: + index(leaderClient(), leaderIndex1, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex1; + assertTotalHitCount(followerIndex, 320, followerClient()); + }); + index(leaderClient(), leaderIndex2, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex2; + assertTotalHitCount(followerIndex, 256, followerClient()); + }); + index(leaderClient(), leaderIndex3, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex3; + assertTotalHitCount(followerIndex, 192, followerClient()); + }); + + deleteAutoFollowPattern(followerClient(), "test_pattern"); + stopIndexFollowing(followerClient(), "copy-" + leaderIndex1); + stopIndexFollowing(followerClient(), "copy-" + leaderIndex2); + stopIndexFollowing(followerClient(), "copy-" + leaderIndex3); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else if (clusterName == ClusterName.FOLLOWER) { + switch (upgradeState) { + case NONE: + putAutoFollowPattern(followerClient(), "test_pattern", "leader", "logs-*"); + createLeaderIndex(leaderClient(), leaderIndex1); + index(leaderClient(), leaderIndex1, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex1; + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(1)); + assertTotalHitCount(followerIndex, 64, followerClient()); + }); + break; + case ONE_THIRD: + index(leaderClient(), leaderIndex1, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex1; + assertTotalHitCount(followerIndex, 128, followerClient()); + }); + // Auto follow stats are kept in-memory on master elected node + // and if this node get updated then auto follow stats are reset + { + int previousNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); + createLeaderIndex(leaderClient(), leaderIndex2); + index(leaderClient(), leaderIndex2, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex2; + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(previousNumberOfSuccessfulFollowedIndices + 1)); + assertTotalHitCount(followerIndex, 64, followerClient()); + }); + } + break; + case TWO_THIRD: + index(leaderClient(), leaderIndex1, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex1; + assertTotalHitCount(followerIndex, 192, followerClient()); + }); + index(leaderClient(), leaderIndex2, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex2; + assertTotalHitCount(followerIndex, 128, followerClient()); + }); + + // Auto follow stats are kept in-memory on master elected node + // and if this node get updated then auto follow stats are reset + { + int previousNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); + createLeaderIndex(leaderClient(), leaderIndex3); + index(leaderClient(), leaderIndex3, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex3; + assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(previousNumberOfSuccessfulFollowedIndices + 1)); + assertTotalHitCount(followerIndex, 64, followerClient()); + }); + } + break; + case ALL: + index(leaderClient(), leaderIndex1, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex1; + assertTotalHitCount(followerIndex, 256, followerClient()); + }); + index(leaderClient(), leaderIndex2, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex2; + assertTotalHitCount(followerIndex, 192, followerClient()); + }); + index(leaderClient(), leaderIndex3, 64); + assertBusy(() -> { + String followerIndex = "copy-" + leaderIndex3; + assertTotalHitCount(followerIndex, 128, followerClient()); + }); + break; + default: + throw new UnsupportedOperationException("unexpected upgrade state [" + upgradeState + "]"); + } + } else { + throw new AssertionError("unexpected cluster_name [" + clusterName + "]"); + } + } + + public void testCannotFollowLeaderInUpgradedCluster() throws Exception { + if (upgradeState != UpgradeState.ALL) { + return; + } + + if (clusterName == ClusterName.FOLLOWER) { + // At this point the leader cluster has not been upgraded, but follower cluster has been upgrade. + // Create a leader index in the follow cluster and try to follow it in the leader cluster. + // This should fail, because the leader cluster at this point in time can't do file based recovery from follower. + createLeaderIndex(followerClient(), "not_supported"); + index(followerClient(), "not_supported", 64); + + ResponseException e = expectThrows(ResponseException.class, + () -> followIndex(leaderClient(), "follower", "not_supported", "not_supported")); + assertThat(e.getMessage(), containsString("the snapshot was created with Elasticsearch version [")); + assertThat(e.getMessage(), containsString("] which is higher than the version of this node [")); + } else if (clusterName == ClusterName.LEADER) { + // At this point all nodes in both clusters have been updated and + // the leader cluster can now follow not_supported index in the follower cluster: + ensureGreen(followerClient(), "not_supported"); + followIndex(leaderClient(), "follower", "not_supported", "not_supported"); + assertTotalHitCount("not_supported", 64, leaderClient()); + } else { + throw new AssertionError("unexpected cluster_name [" + clusterName + "]"); + } + } + + public void testBiDirectionalIndexFollowing() throws Exception { + logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); + + if (clusterName == ClusterName.FOLLOWER) { + switch (upgradeState) { + case NONE: + createLeaderIndex(leaderClient(), "leader_index5"); + index(leaderClient(), "leader_index5", 128); + + followIndex(followerClient(), "leader", "leader_index5", "follower_index5"); + followIndex(leaderClient(), "follower", "follower_index5", "follower_index6"); + assertTotalHitCount("follower_index5", 128, followerClient()); + assertTotalHitCount("follower_index6", 128, leaderClient()); + + index(leaderClient(), "leader_index5", 128); + pauseIndexFollowing(followerClient(), "follower_index5"); + pauseIndexFollowing(leaderClient(), "follower_index6"); + break; + case ONE_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case TWO_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case ALL: + index(leaderClient(), "leader_index5", 128); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else if (clusterName == ClusterName.LEADER) { + switch (upgradeState) { + case NONE: + break; + case ONE_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case TWO_THIRD: + index(leaderClient(), "leader_index5", 128); + break; + case ALL: + ensureGreen(followerClient(), "follower_index5"); + resumeIndexFollowing(followerClient(), "follower_index5"); + ensureGreen(leaderClient(), "follower_index6"); + resumeIndexFollowing(leaderClient(), "follower_index6"); + + assertTotalHitCount("follower_index5", 896, followerClient()); + assertTotalHitCount("follower_index6", 896, leaderClient()); + break; + default: + throw new AssertionError("unexpected upgrade_state [" + upgradeState + "]"); + } + } else { + throw new AssertionError("unexpected cluster_name [" + clusterName + "]"); + } + } + + private static void createLeaderIndex(RestClient client, String indexName) throws IOException { + Settings.Builder indexSettings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0); + if (UPGRADE_FROM_VERSION.before(Version.V_7_0_0) || randomBoolean()) { + indexSettings.put("index.soft_deletes.enabled", true); + } + createIndex(client, indexName, indexSettings.build()); + } + + private static void createIndex(RestClient client, String name, Settings settings) throws IOException { + Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings) + "}"); + client.performRequest(request); + } + + private static void followIndex(RestClient client, String leaderCluster, String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow?wait_for_active_shards=1"); + request.setJsonEntity("{\"remote_cluster\": \"" + leaderCluster + "\", \"leader_index\": \"" + leaderIndex + + "\", \"read_poll_timeout\": \"10ms\"}"); + assertOK(client.performRequest(request)); + } + + private static void putAutoFollowPattern(RestClient client, String name, String remoteCluster, String pattern) throws IOException { + Request request = new Request("PUT", "/_ccr/auto_follow/" + name); + request.setJsonEntity("{\"leader_index_patterns\": [\"" + pattern + "\"], \"remote_cluster\": \"" + remoteCluster + "\"," + + "\"follow_index_pattern\": \"copy-{{leader_index}}\", \"read_poll_timeout\": \"10ms\"}"); + assertOK(client.performRequest(request)); + } + + private static void deleteAutoFollowPattern(RestClient client, String patternName) throws IOException { + Request request = new Request("DELETE", "/_ccr/auto_follow/" + patternName); + assertOK(client.performRequest(request)); + } + + private int getNumberOfSuccessfulFollowedIndices() throws IOException { + Request statsRequest = new Request("GET", "/_ccr/stats"); + Map response = toMap(client().performRequest(statsRequest)); + Integer actualSuccessfulFollowedIndices = ObjectPath.eval("auto_follow_stats.number_of_successful_follow_indices", response); + if (actualSuccessfulFollowedIndices != null) { + return actualSuccessfulFollowedIndices; + } else { + return -1; + } + } + + private static void index(RestClient client, String index, int numDocs) throws IOException { + for (int i = 0; i < numDocs; i++) { + final Request request = new Request("POST", "/" + index + "/_doc/"); + request.setJsonEntity("{}"); + assertOK(client.performRequest(request)); + if (randomIntBetween(0, 5) == 3) { + assertOK(client.performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + } + } + + private static void assertTotalHitCount(final String index, + final int expectedTotalHits, + final RestClient client) throws Exception { + assertOK(client.performRequest(new Request("POST", "/" + index + "/_refresh"))); + assertBusy(() -> verifyTotalHitCount(index, expectedTotalHits, client)); + } + + private static void verifyTotalHitCount(final String index, + final int expectedTotalHits, + final RestClient client) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); + Map response = toMap(client.performRequest(request)); + final int totalHits = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(totalHits, equalTo(expectedTotalHits)); + } + + private static void stopIndexFollowing(RestClient client, String followerIndex) throws IOException { + pauseIndexFollowing(client, followerIndex); + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_close"))); + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/unfollow"))); + } + + private static void pauseIndexFollowing(RestClient client, String followerIndex) throws IOException { + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/pause_follow"))); + } + + private static void resumeIndexFollowing(RestClient client, String followerIndex) throws IOException { + assertOK(client.performRequest(new Request("POST", "/" + followerIndex + "/_ccr/resume_follow"))); + } + + private static void ensureGreen(RestClient client, String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "green"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + client.performRequest(request); + } + +} diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 04c4367a11305..69e1b53499dc9 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -3,11 +3,11 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' -unitTest.enabled = false +apply plugin: 'elasticsearch.standalone-test' + +test.enabled = false dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here @@ -68,159 +68,50 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" forbiddenPatterns { exclude '**/system_key' } -// Tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' - - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java"] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } - - forbiddenPatterns { - exclude '**/system_key' - } +String outputDir = "${buildDir}/generated-resources/${project.name}" - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeystore(type: Copy) { - from project(':x-pack:plugin:core') - .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') +task copyTestNodeKeyMaterial(type: Copy) { + from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.wireCompatible) { +for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeystore - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - } - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - bwcVersion = version - numBwcNodes = 3 - numNodes = 3 - clusterName = 'rolling-upgrade' - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (version.onOrAfter('7.0.0')) { - setting 'xpack.security.authc.realms.file.file1.order', '0' - setting 'xpack.security.authc.realms.native.native1.order', '1' - } else { - setting 'xpack.security.authc.realms.file1.type', 'file' - setting 'xpack.security.authc.realms.file1.order', '0' - setting 'xpack.security.authc.realms.native1.type', 'native' - setting 'xpack.security.authc.realms.native1.order', '1' - } - - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' - } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } else { - String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' - extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" } - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } - - if (version.onOrAfter('6.6.0')) { - setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' - } - - // Old versions of the code contain an invalid assertion that trips - // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing - // the assertion, but this is impossible for released versions. - // However, released versions run without assertions, so end users won't - // be suffering the effects. This argument effectively removes the - // incorrect assertion from the older versions used in the BWC tests. - if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { - jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' - } - - systemProperty 'tests.rest.blacklist', [ - 'old_cluster/30_ml_jobs_crud/*', - 'old_cluster/40_ml_datafeed_crud/*', - ].join(',') - } - - Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") - oldClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'old_cluster' - } - - Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> - configure(extensions.findByName("${baseName}#${name}")) { - dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + bwcVersion = version + numBwcNodes = 3 + numNodes = 3 clusterName = 'rolling-upgrade' - otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } - minimumMasterNodes = { 2 } - autoSetInitialMasterNodes = false - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.enabled', 'false' @@ -229,152 +120,188 @@ subprojects { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'testnode' - setting 'node.attr.upgraded', 'true' setting 'xpack.security.authc.token.enabled', 'true' setting 'xpack.security.audit.enabled', 'true' - setting 'node.name', "upgraded-node-${stopNode}" - dependsOn copyTestNodeKeystore + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + if (version.onOrAfter('7.0.0')) { + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' + } else { + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' } - if (version.before('6.0.0')) { - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + + if (version.onOrAfter('6.6.0')) { + setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' } - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/30_ml_jobs_crud/*', - 'mixed_cluster/40_ml_datafeed_crud/*', - ].join(',') - } + // Old versions of the code contain an invalid assertion that trips + // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing + // the assertion, but this is impossible for released versions. + // However, released versions run without assertions, so end users won't + // be suffering the effects. This argument effectively removes the + // incorrect assertion from the older versions used in the BWC tests. + if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { + jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' + } + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'old_cluster' + } + + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } + minimumMasterNodes = { 2 } + autoSetInitialMasterNodes = false + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + } } Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'true' - // We only need to run these tests once so we may as well do it when we're two thirds upgraded - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', - 'mixed_cluster/30_ml_jobs_crud/*', - 'mixed_cluster/40_ml_datafeed_crud/*', + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed without aggs in mixed cluster', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed with aggs in mixed cluster' ].join(',') - finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'false' - finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" - - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/30_ml_jobs_crud/*', - 'mixed_cluster/40_ml_datafeed_crud/*', - ].join(',') + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'upgraded_cluster' - /* - * Force stopping all the upgraded nodes after the test runner - * so they are alive during the test. - */ - finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" - finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" - - // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. - // this stinks but we do the check here since our rest tests do not support conditionals - // otherwise we could check the index created version - String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') - String[] versionParts = versionStr.split('\\.') - if (versionParts[0].equals("5")) { - Integer minor = Integer.parseInt(versionParts[1]) - if (minor >= 2) { - systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' - } - } - - systemProperty 'tests.rest.blacklist', [ - 'upgraded_cluster/30_ml_jobs_crud/*', - 'upgraded_cluster/40_ml_datafeed_crud/*', - ].join(',') + systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" + + // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. + // this stinks but we do the check here since our rest tests do not support conditionals + // otherwise we could check the index created version + String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') + String[] versionParts = versionStr.split('\\.') + if (versionParts[0].equals("5")) { + Integer minor = Integer.parseInt(versionParts[1]) + if (minor >= 2) { + systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' + } + } } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task integTest { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedWireCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedWireCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - check.dependsOn(integTest) - - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('watcher')) - } - - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +} +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackProject('plugin').path).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index d077ecedbeb22..0d0f05bcf9c6d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -87,7 +87,7 @@ public void setupForTests() throws Exception { for (String template : templatesToWaitFor()) { try { final Request headRequest = new Request("HEAD", "_template/" + template); - headRequest.setOptions(allowTypeRemovalWarnings()); + headRequest.setOptions(allowTypesRemovalWarnings()); final boolean exists = adminClient() .performRequest(headRequest) .getStatusLine().getStatusCode() == 200; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java deleted file mode 100644 index 9fa34568a1e14..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/CCRIT.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.upgrades; - -import org.apache.http.util.EntityUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ObjectPath; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.json.JsonXContent; - -import java.io.IOException; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; - -public class CCRIT extends AbstractUpgradeTestCase { - - private static final Logger LOGGER = LogManager.getLogger(CCRIT.class); - - private static final Version UPGRADE_FROM_VERSION = - Version.fromString(System.getProperty("tests.upgrade_from_version")); - - private static final boolean SECOND_ROUND = "false".equals(System.getProperty("tests.first_round")); - - @Override - protected boolean preserveClusterSettings() { - return true; - } - - public void testIndexFollowing() throws Exception { - assumeTrue("CCR became available in 6.5, but test relies on a fix that was shipped with 6.6.0", - UPGRADE_FROM_VERSION.onOrAfter(Version.V_6_6_0)); - setupRemoteCluster(); - - final String leaderIndex = "my-leader-index"; - final String followerIndex = "my-follower-index"; - - switch (CLUSTER_TYPE) { - case OLD: - Settings indexSettings = Settings.builder() - .put("index.soft_deletes.enabled", true) - .put("index.number_of_shards", 1) - .build(); - createIndex(leaderIndex, indexSettings); - followIndex(leaderIndex, followerIndex); - index(leaderIndex, "1"); - assertDocumentExists(leaderIndex, "1"); - assertBusy(() -> { - assertFollowerGlobalCheckpoint(followerIndex, 0); - assertDocumentExists(followerIndex, "1"); - }); - break; - case MIXED: - if (SECOND_ROUND == false) { - index(leaderIndex, "2"); - assertDocumentExists(leaderIndex, "1", "2"); - assertBusy(() -> { - assertFollowerGlobalCheckpoint(followerIndex, 1); - assertDocumentExists(followerIndex, "1", "2"); - }); - } else { - index(leaderIndex, "3"); - assertDocumentExists(leaderIndex, "1", "2", "3"); - assertBusy(() -> { - assertFollowerGlobalCheckpoint(followerIndex, 2); - assertDocumentExists(followerIndex, "1", "2", "3"); - }); - } - break; - case UPGRADED: - index(leaderIndex, "4"); - assertDocumentExists(leaderIndex, "1", "2", "3", "4"); - assertBusy(() -> { - assertFollowerGlobalCheckpoint(followerIndex, 3); - assertDocumentExists(followerIndex, "1", "2", "3", "4"); - }); - stopIndexFollowing(followerIndex); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37231") - public void testAutoFollowing() throws Exception { - assumeTrue("CCR became available in 6.5, but test relies on a fix that was shipped with 6.6.0", - UPGRADE_FROM_VERSION.onOrAfter(Version.V_6_6_0)); - setupRemoteCluster(); - - final Settings indexSettings = Settings.builder() - .put("index.soft_deletes.enabled", true) - .put("index.number_of_shards", 1) - .build(); - - String leaderIndex1 = "logs-20200101"; - String leaderIndex2 = "logs-20200102"; - String leaderIndex3 = "logs-20200103"; - - switch (CLUSTER_TYPE) { - case OLD: - putAutoFollowPattern("test_pattern", "logs-*"); - createIndex(leaderIndex1, indexSettings); - index(leaderIndex1, "1"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; - assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(1)); - assertFollowerGlobalCheckpoint(followerIndex, 0); - assertDocumentExists(followerIndex, "1"); - }); - break; - case MIXED: - if (SECOND_ROUND == false) { - index(leaderIndex1, "2"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; - assertFollowerGlobalCheckpoint(followerIndex, 1); - assertDocumentExists(followerIndex, "2"); - }); - // Auto follow stats are kept in-memory on master elected node - // and if this node get updated then auto follow stats are reset - int previousNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); - createIndex(leaderIndex2, indexSettings); - index(leaderIndex2, "1"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex2; - assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(previousNumberOfSuccessfulFollowedIndices + 1)); - assertFollowerGlobalCheckpoint(followerIndex, 0); - assertDocumentExists(followerIndex, "1"); - }); - } else { - index(leaderIndex1, "3"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; - assertFollowerGlobalCheckpoint(followerIndex, 2); - assertDocumentExists(followerIndex, "3"); - }); - index(leaderIndex2, "2"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex2; - assertFollowerGlobalCheckpoint(followerIndex, 1); - assertDocumentExists(followerIndex, "2"); - }); - - // Auto follow stats are kept in-memory on master elected node - // and if this node get updated then auto follow stats are reset - int previousNumberOfSuccessfulFollowedIndices = getNumberOfSuccessfulFollowedIndices(); - createIndex(leaderIndex3, indexSettings); - index(leaderIndex3, "1"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex3; - assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(previousNumberOfSuccessfulFollowedIndices + 1)); - assertFollowerGlobalCheckpoint(followerIndex, 0); - assertDocumentExists(followerIndex, "1"); - }); - } - break; - case UPGRADED: - index(leaderIndex1, "4"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; - assertFollowerGlobalCheckpoint(followerIndex, 3); - assertDocumentExists(followerIndex, "4"); - }); - index(leaderIndex2, "3"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex2; - assertFollowerGlobalCheckpoint(followerIndex, 2); - assertDocumentExists(followerIndex, "3"); - }); - index(leaderIndex3, "2"); - assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex3; - assertFollowerGlobalCheckpoint(followerIndex, 1); - assertDocumentExists(followerIndex, "2"); - }); - - deleteAutoFollowPattern("test_pattern"); - - stopIndexFollowing("copy-" + leaderIndex1); - stopIndexFollowing("copy-" + leaderIndex2); - stopIndexFollowing("copy-" + leaderIndex3); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); - } - } - - private static void stopIndexFollowing(String followerIndex) throws IOException { - pauseFollow(followerIndex); - closeIndex(followerIndex); - unfollow(followerIndex); - } - - private static void followIndex(String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); - request.setJsonEntity("{\"remote_cluster\": \"local\", \"leader_index\": \"" + leaderIndex + - "\", \"read_poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void pauseFollow(String followIndex) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/pause_follow"))); - } - - private static void unfollow(String followIndex) throws IOException { - assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); - } - - private static void putAutoFollowPattern(String name, String pattern) throws IOException { - Request request = new Request("PUT", "/_ccr/auto_follow/" + name); - request.setJsonEntity("{\"leader_index_patterns\": [\"" + pattern + "\"], \"remote_cluster\": \"local\"," + - "\"follow_index_pattern\": \"copy-{{leader_index}}\", \"read_poll_timeout\": \"10ms\"}"); - assertOK(client().performRequest(request)); - } - - private static void deleteAutoFollowPattern(String patternName) throws IOException { - Request request = new Request("DELETE", "/_ccr/auto_follow/" + patternName); - assertOK(client().performRequest(request)); - } - - private static void index(String index, String id) throws IOException { - Request request = new Request("POST", "/" + index + "/_doc/" + id); - request.setJsonEntity("{}"); - assertOK(client().performRequest(request)); - } - - private static void assertDocumentExists(String index, String... ids) throws IOException { - for (String id : ids) { - Request request = new Request("HEAD", "/" + index + "/_doc/" + id); - Response response = client().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - } - } - - private static void setupRemoteCluster() throws IOException { - Request request = new Request("GET", "/_nodes"); - Map nodesResponse = (Map) toMap(client().performRequest(request)).get("nodes"); - // Select node info of first node (we don't know the node id): - nodesResponse = (Map) nodesResponse.get(nodesResponse.keySet().iterator().next()); - String transportAddress = (String) nodesResponse.get("transport_address"); - - LOGGER.info("Configuring local remote cluster [{}]", transportAddress); - request = new Request("PUT", "/_cluster/settings"); - request.setJsonEntity("{\"persistent\": {\"cluster.remote.local.seeds\": \"" + transportAddress + "\"}}"); - assertThat(client().performRequest(request).getStatusLine().getStatusCode(), equalTo(200)); - } - - private int getNumberOfSuccessfulFollowedIndices() throws IOException { - Request statsRequest = new Request("GET", "/_ccr/stats"); - Map response = toMap(client().performRequest(statsRequest)); - Integer actualSuccessfulFollowedIndices = ObjectPath.eval("auto_follow_stats.number_of_successful_follow_indices", response); - if (actualSuccessfulFollowedIndices != null) { - return actualSuccessfulFollowedIndices; - } else { - return -1; - } - } - - private void assertFollowerGlobalCheckpoint(String followerIndex, int expectedFollowerCheckpoint) throws IOException { - Request statsRequest = new Request("GET", "/" + followerIndex + "/_stats"); - statsRequest.addParameter("level", "shards"); - // Just docs metric is sufficient here: - statsRequest.addParameter("metric", "docs"); - Map response = toMap(client().performRequest(statsRequest)); - LOGGER.info("INDEX STATS={}", response); - assertThat(((Map) response.get("indices")).size(), equalTo(1)); - Integer actualFollowerCheckpoint = ObjectPath.eval("indices." + followerIndex + ".shards.0.0.seq_no.global_checkpoint", response); - assertThat(actualFollowerCheckpoint, equalTo(expectedFollowerCheckpoint)); - } - - private static Map toMap(Response response) throws IOException { - return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); - } - -} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 1f0b8cffe20b0..d788b87802cc9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.action.document.RestGetAction; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -64,7 +63,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { assertNotNull(token); assertTokenWorks(token); - Request indexRequest1 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token1"); + Request indexRequest1 = new Request("PUT", "token_backwards_compatibility_it/_doc/old_cluster_token1"); indexRequest1.setJsonEntity( "{\n" + " \"token\": \"" + token + "\"\n" + @@ -78,7 +77,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { token = (String) responseMap.get("access_token"); assertNotNull(token); assertTokenWorks(token); - Request indexRequest2 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token2"); + Request indexRequest2 = new Request("PUT", "token_backwards_compatibility_it/_doc/old_cluster_token2"); indexRequest2.setJsonEntity( "{\n" + " \"token\": \"" + token + "\"\n" + @@ -89,8 +88,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token1"); Response getResponse = client().performRequest(getRequest); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -100,8 +98,7 @@ public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { public void testMixedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token2"); Response getResponse = client().performRequest(getRequest); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -152,8 +149,7 @@ public void testMixedCluster() throws Exception { public void testUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token2"); Response getResponse = client().performRequest(getRequest); assertOK(getResponse); @@ -168,8 +164,7 @@ public void testUpgradedCluster() throws Exception { assertOK(invalidationResponse); assertTokenDoesNotWork(token); - getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token1"); getResponse = client().performRequest(getRequest); source = (Map) entityAsMap(getResponse).get("_source"); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index b9ae06499d112..18e9f66603a0e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,24 +1,44 @@ --- -"Test old cluster datafeed": +"Test old cluster datafeed without aggs": - do: ml.get_datafeeds: - datafeed_id: old-cluster-datafeed - - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed"} + datafeed_id: old-cluster-datafeed-without-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-without-aggs"} - length: { datafeeds.0.indices: 1 } - gte: { datafeeds.0.scroll_size: 2000 } + - match: { datafeeds.0.script_fields.double_responsetime.script.lang: painless } + - is_false: datafeeds.0.aggregations - do: ml.get_datafeed_stats: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-without-aggs - match: { datafeeds.0.state: "stopped"} - is_false: datafeeds.0.node --- -"Put job and datafeed in mixed cluster": +"Test old cluster datafeed with aggs": + - do: + ml.get_datafeeds: + datafeed_id: old-cluster-datafeed-with-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-aggs"} + - length: { datafeeds.0.indices: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + - is_false: datafeeds.0.script_fields + - match: { datafeeds.0.aggregations.buckets.date_histogram.field: time } + - match: { datafeeds.0.aggregations.buckets.aggregations.time.max.field: time } + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-with-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + +--- +"Put job and datafeed without aggs in mixed cluster": - do: ml.put_job: - job_id: mixed-cluster-datafeed-job + job_id: mixed-cluster-datafeed-job-without-aggs body: > { "description":"Cluster upgrade", @@ -37,16 +57,90 @@ - do: ml.put_datafeed: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-without-aggs body: > { - "job_id":"mixed-cluster-datafeed-job", + "job_id":"mixed-cluster-datafeed-job-without-aggs", "indices":["airline-data"], - "scroll_size": 2000 + "scroll_size": 2000, + "script_fields": { + "double_responsetime": { + "script": { + "lang": "painless", + "source": "doc['responsetime'].value * 2" + } + } + } + } + + - do: + ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed-without-aggs + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node + +--- +"Put job and datafeed with aggs in mixed cluster": + + - do: + ml.put_job: + job_id: mixed-cluster-datafeed-job-with-aggs + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + ml.put_datafeed: + datafeed_id: mixed-cluster-datafeed-with-aggs + body: > + { + "job_id":"mixed-cluster-datafeed-job-with-aggs", + "indices":["airline-data"], + "scroll_size": 2000, + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "interval": "30s", + "time_zone": "UTC" + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } } - do: ml.get_datafeed_stats: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-with-aggs - match: { datafeeds.0.state: stopped} - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml index 2952e649c76c8..2a1dd4397dc56 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_watcher.yml @@ -1,9 +1,13 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + # no need to put watch, exists already - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -11,7 +15,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -41,12 +45,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -55,12 +59,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -69,7 +73,12 @@ --- "Test watcher stats output": + + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index bbe26eb69bd20..597540d36c4ec 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,9 @@ --- -"Put job and datafeed in old cluster": +"Put job and datafeed without aggs in old cluster": - do: ml.put_job: - job_id: old-cluster-datafeed-job + job_id: old-cluster-datafeed-job-without-aggs body: > { "description":"Cluster upgrade", @@ -19,21 +19,95 @@ "time_field":"time" } } - - match: { job_id: old-cluster-datafeed-job } + - match: { job_id: old-cluster-datafeed-job-without-aggs } - do: ml.put_datafeed: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-without-aggs body: > { - "job_id":"old-cluster-datafeed-job", + "job_id":"old-cluster-datafeed-job-without-aggs", "indices":["airline-data"], - "types":["response"], - "scroll_size": 2000 + "scroll_size": 2000, + "script_fields": { + "double_responsetime": { + "script": { + "lang": "painless", + "source": "doc['responsetime'].value * 2" + } + } + } + } + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-without-aggs + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node + +--- +"Put job and datafeed with aggs in old cluster": + + - do: + ml.put_job: + job_id: old-cluster-datafeed-job-with-aggs + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + - match: { job_id: old-cluster-datafeed-job-with-aggs } + + - do: + ml.put_datafeed: + datafeed_id: old-cluster-datafeed-with-aggs + body: > + { + "job_id":"old-cluster-datafeed-job-with-aggs", + "indices":["airline-data"], + "scroll_size": 2000, + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "interval": "30s", + "time_zone": "UTC" + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } } - do: ml.get_datafeed_stats: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.state: stopped} - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml index 810307bbb2846..aafb7ddf239bb 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_watcher.yml @@ -1,8 +1,11 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -23,7 +26,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -31,7 +34,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -61,12 +64,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -75,12 +78,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -89,7 +92,12 @@ --- "Test watcher stats output": + + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index e962c20a7e9eb..5d8775dfb2c11 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -77,27 +77,6 @@ setup: name: ".ml-state-write" - is_true: '' ---- -"Test job with no model memory limit has established model memory after reopening": - - do: - ml.open_job: - job_id: no-model-memory-limit-job - - - do: - ml.get_jobs: - job_id: no-model-memory-limit-job - - is_true: jobs.0.established_model_memory - - lt: { jobs.0.established_model_memory: 100000 } - - - do: - ml.close_job: - job_id: no-model-memory-limit-job - - - do: - ml.delete_job: - job_id: no-model-memory-limit-job - - match: { acknowledged: true } - --- "Test job with pre 6.4 rules": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index 928fb3a066c28..f2dbb2e80dc8e 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -6,6 +6,8 @@ setup: # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s +--- +"Test old and mixed cluster datafeeds without aggs": - do: indices.create: index: airline-data @@ -15,82 +17,187 @@ setup: time: type: date + - do: + ml.get_datafeeds: + datafeed_id: old-cluster-datafeed-without-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-without-aggs"} + - length: { datafeeds.0.indices: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + - match: { datafeeds.0.script_fields.double_responsetime.script.lang: painless } + - is_false: datafeeds.0.aggregations + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-without-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + ml.get_datafeeds: + datafeed_id: mixed-cluster-datafeed-without-aggs + - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed-without-aggs"} + - length: { datafeeds.0.indices: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + - match: { datafeeds.0.script_fields.double_responsetime.script.lang: painless } + - is_false: datafeeds.0.aggregations + + - do: + ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed-without-aggs + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + ml.open_job: + job_id: old-cluster-datafeed-job-without-aggs + + - do: + ml.start_datafeed: + datafeed_id: old-cluster-datafeed-without-aggs + start: 0 + + - do: + ml.stop_datafeed: + datafeed_id: old-cluster-datafeed-without-aggs + + - do: + ml.close_job: + job_id: old-cluster-datafeed-job-without-aggs + + - do: + ml.delete_datafeed: + datafeed_id: old-cluster-datafeed-without-aggs + + - do: + ml.delete_job: + job_id: old-cluster-datafeed-job-without-aggs + - match: { acknowledged: true } + + - do: + ml.open_job: + job_id: mixed-cluster-datafeed-job-without-aggs + + - do: + ml.start_datafeed: + datafeed_id: mixed-cluster-datafeed-without-aggs + start: 0 + + - do: + ml.stop_datafeed: + datafeed_id: mixed-cluster-datafeed-without-aggs + + - do: + ml.close_job: + job_id: mixed-cluster-datafeed-job-without-aggs + + - do: + ml.delete_datafeed: + datafeed_id: mixed-cluster-datafeed-without-aggs + + - do: + ml.delete_job: + job_id: mixed-cluster-datafeed-job-without-aggs + - match: { acknowledged: true } + + - do: + indices.delete: + index: airline-data + --- -"Test old and mixed cluster datafeeds": +"Test old and mixed cluster datafeeds with aggs": + - do: + indices.create: + index: airline-data + body: + mappings: + properties: + time: + type: date + - do: ml.get_datafeeds: - datafeed_id: old-cluster-datafeed - - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed"} + datafeed_id: old-cluster-datafeed-with-aggs + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-aggs"} - length: { datafeeds.0.indices: 1 } - gte: { datafeeds.0.scroll_size: 2000 } + - is_false: datafeeds.0.script_fields + - match: { datafeeds.0.aggregations.buckets.date_histogram.field: time } + - match: { datafeeds.0.aggregations.buckets.aggregations.time.max.field: time } - do: ml.get_datafeed_stats: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.state: "stopped"} - is_false: datafeeds.0.node - do: ml.get_datafeeds: - datafeed_id: mixed-cluster-datafeed - - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed"} + datafeed_id: mixed-cluster-datafeed-with-aggs + - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed-with-aggs"} - length: { datafeeds.0.indices: 1 } - gte: { datafeeds.0.scroll_size: 2000 } + - is_false: datafeeds.0.script_fields + - match: { datafeeds.0.aggregations.buckets.date_histogram.field: time } + - match: { datafeeds.0.aggregations.buckets.aggregations.time.max.field: time } - do: ml.get_datafeed_stats: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-with-aggs - match: { datafeeds.0.state: "stopped"} - is_false: datafeeds.0.node - do: ml.open_job: - job_id: old-cluster-datafeed-job + job_id: old-cluster-datafeed-job-with-aggs - do: ml.start_datafeed: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-with-aggs start: 0 - do: ml.stop_datafeed: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-with-aggs - do: ml.close_job: - job_id: old-cluster-datafeed-job + job_id: old-cluster-datafeed-job-with-aggs - do: ml.delete_datafeed: - datafeed_id: old-cluster-datafeed + datafeed_id: old-cluster-datafeed-with-aggs - do: ml.delete_job: - job_id: old-cluster-datafeed-job + job_id: old-cluster-datafeed-job-with-aggs - match: { acknowledged: true } - do: ml.open_job: - job_id: mixed-cluster-datafeed-job + job_id: mixed-cluster-datafeed-job-with-aggs - do: ml.start_datafeed: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-with-aggs start: 0 - do: ml.stop_datafeed: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-with-aggs - do: ml.close_job: - job_id: mixed-cluster-datafeed-job + job_id: mixed-cluster-datafeed-job-with-aggs - do: ml.delete_datafeed: - datafeed_id: mixed-cluster-datafeed + datafeed_id: mixed-cluster-datafeed-with-aggs - do: ml.delete_job: - job_id: mixed-cluster-datafeed-job + job_id: mixed-cluster-datafeed-job-with-aggs - match: { acknowledged: true } + + - do: + indices.delete: + index: airline-data diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml index 3828db6128f91..dacb437d4b449 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_watcher.yml @@ -1,9 +1,13 @@ --- "CRUD watch APIs": + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + # no need to put watch, exists already - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -11,7 +15,7 @@ # execute watch - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" body: > { @@ -41,12 +45,12 @@ # deactivate watch, check with GET API as well - do: - xpack.watcher.deactivate_watch: + watcher.deactivate_watch: watch_id: "my_watch" - match: { status.state.active : false } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -55,12 +59,12 @@ # activate watch again, check with GET API as well - do: - xpack.watcher.activate_watch: + watcher.activate_watch: watch_id: "my_watch" - match: { status.state.active : true } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { found : true} - match: { _id: "my_watch" } @@ -68,7 +72,12 @@ --- "Test watcher stats output": + + - skip: + reason: https://github.com/elastic/elasticsearch/issues/33185 + version: "6.7.0 - " + - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle deleted file mode 100644 index 03505e01dedd8..0000000000000 --- a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.with-system-key" diff --git a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle b/x-pack/qa/rolling-upgrade/without-system-key/build.gradle deleted file mode 100644 index aa7ac502eb3e6..0000000000000 --- a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.without-system-key" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 2b90cbaf9a679..7b76321fe9d4f 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -1,9 +1,9 @@ Project idpFixtureProject = xpackProject("test:idp-fixture") evaluationDependsOn(idpFixtureProject.path) -apply plugin: 'elasticsearch.vagrantsupport' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.test.fixtures' dependencies { // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here @@ -12,24 +12,36 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile 'com.google.jimfs:jimfs:1.1' } +testFixtures.useFixture ":x-pack:test:idp-fixture" -task idpFixture { - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion", idpFixtureProject.up -} String outputDir = "${project.buildDir}/generated-resources/${project.name}" -task copyIdpCertificate(type: Copy) { - from idpFixtureProject.file('src/main/resources/certs/ca.crt'); +task copyIdpFiles(type: Copy) { + from idpFixtureProject.files('idp/shibboleth-idp/credentials/idp-browser.pem', 'idp/shibboleth-idp/metadata/idp-metadata.xml'); into outputDir } -if (project.rootProject.vagrantSupported) { - project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpCertificate) - integTestCluster.dependsOn idpFixture, copyIdpCertificate - integTest.finalizedBy idpFixtureProject.halt -} else { - integTest.enabled = false - testingConventions.enabled = false +project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpFiles) + +task setupPorts { + dependsOn copyIdpFiles, idpFixtureProject.postProcessFixture + doLast { + String portString = idpFixtureProject.postProcessFixture.ext."test.fixtures.shibboleth-idp.tcp.4443" + int ephemeralPort = Integer.valueOf(portString) + File idpMetaFile = file(outputDir + '/idp-metadata.xml') + List lines = idpMetaFile.readLines("UTF-8") + StringBuilder content = new StringBuilder() + for (String line : lines) { + content.append(line.replace("localhost:4443", "localhost:" + ephemeralPort)) + } + idpMetaFile.delete() + idpMetaFile.createNewFile() + idpMetaFile.write(content.toString(), "UTF-8") + } } +// Don't attempt to get ephemeral ports when Docker is not available +setupPorts.onlyIf { idpFixtureProject.postProcessFixture.enabled } + +integTestCluster.dependsOn setupPorts integTestCluster { setting 'xpack.license.self_generated.type', 'trial' @@ -59,8 +71,9 @@ integTestCluster { setting 'xpack.security.authc.realms.native.native.order', '3' setting 'xpack.ml.enabled', 'false' + setting 'logger.org.elasticsearch.xpack.security', 'TRACE' - extraConfigFile 'idp-metadata.xml', idpFixtureProject.file("src/main/resources/provision/generated/idp-metadata.xml") + extraConfigFile 'idp-metadata.xml', file(outputDir + "/idp-metadata.xml") setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" diff --git a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index b28c7926c381f..505ca458aac05 100644 --- a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -367,7 +367,7 @@ private URI goToLoginPage(CloseableHttpClient client, BasicHttpContext context) private URI submitLoginForm(CloseableHttpClient client, BasicHttpContext context, URI formUri) throws IOException { final HttpPost form = new HttpPost(formUri); List params = new ArrayList<>(); - params.add(new BasicNameValuePair("j_username", "Thor")); + params.add(new BasicNameValuePair("j_username", "thor")); params.add(new BasicNameValuePair("j_password", "NickFuryHeartsES")); params.add(new BasicNameValuePair("_eventId_proceed", "")); form.setEntity(new UrlEncodedFormEntity(params)); @@ -376,7 +376,6 @@ private URI submitLoginForm(CloseableHttpClient client, BasicHttpContext context assertThat(response.getStatusLine().getStatusCode(), equalTo(302)); return response.getFirstHeader("Location").getValue(); }); - assertThat(redirect, startsWith("/")); String target = execute(client, new HttpGet(formUri.resolve(redirect)), context, response -> { assertHttpOk(response.getStatusLine()); @@ -620,7 +619,7 @@ private CloseableHttpClient getHttpClient() throws Exception { } private SSLContext getClientSslContext() throws Exception { - final Path pem = getDataPath("/ca.crt"); + final Path pem = getDataPath("/idp-browser.pem"); final Certificate[] certificates = CertParsingUtils.readCertificates(Collections.singletonList(pem)); final X509ExtendedTrustManager trustManager = CertParsingUtils.trustManager(certificates); SSLContext context = SSLContext.getInstance("TLS"); @@ -638,5 +637,4 @@ private URI getWebServerUri() { throw new ElasticsearchException("Cannot construct URI for httpServer @ {}:{}", e, host, port); } } - } diff --git a/x-pack/qa/security-tools-tests/build.gradle b/x-pack/qa/security-tools-tests/build.gradle index 5df22c557db3c..135f82bb4a617 100644 --- a/x-pack/qa/security-tools-tests/build.gradle +++ b/x-pack/qa/security-tools-tests/build.gradle @@ -8,7 +8,9 @@ dependencies { } // add test resources from security, so certificate tool tests can use example certs -sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +processTestResources { + from(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +} // we have to repeate these patterns because the security test resources are effectively in the src of this project forbiddenPatterns { diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 5721815f07856..e88eac3028f3d 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,13 +1,6 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.NodeInfo - -import javax.net.ssl.HttpsURLConnection -import javax.net.ssl.KeyManager -import javax.net.ssl.SSLContext -import javax.net.ssl.TrustManagerFactory -import java.nio.charset.StandardCharsets -import java.security.KeyStore -import java.security.SecureRandom +import org.elasticsearch.gradle.http.WaitForHttpResource apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -86,45 +79,12 @@ integTestCluster { 'bin/elasticsearch-users', 'useradd', 'monitoring_agent', '-p', 'x-pack-test-password', '-r', 'remote_monitoring_agent' waitCondition = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - KeyStore keyStore = KeyStore.getInstance("JKS"); - keyStore.load(clientKeyStore.newInputStream(), 'testclient'.toCharArray()); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(keyStore); - // We don't need a KeyManager as there won't be client auth required so pass an empty array - SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - sslContext.init(new KeyManager[0], tmf.getTrustManagers(), new SecureRandom()); - for (int i = 0; i < 10; i++) { - // we use custom wait logic here for HTTPS - HttpsURLConnection httpURLConnection = null; - try { - httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); - httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.connect(); - if (httpURLConnection.getResponseCode() == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - } - } catch (IOException e) { - if (i == 9) { - logger.error("final attempt of calling cluster health failed", e) - } else { - logger.debug("failed to call cluster health", e) - } - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - return tmpFile.exists() + WaitForHttpResource http = new WaitForHttpResource("https", node.httpUri(), numNodes) + http.setTrustStoreFile(clientKeyStore) + http.setTrustStorePassword("testclient") + http.setUsername("test_user") + http.setPassword("x-pack-test-password") + return http.wait(5000) } } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index eb92bd29cd78d..879be233fa180 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -53,13 +53,13 @@ public void startWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); @@ -76,7 +76,7 @@ public void startWatcher() throws Exception { }); assertBusy(() -> { - for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", singletonMap("name", template), emptyList(), emptyMap()); assertThat(templateExistsResponse.getStatusCode(), is(200)); @@ -88,7 +88,7 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { @@ -101,7 +101,7 @@ public void stopWatcher() throws Exception { throw new AssertionError("waiting until starting state reached started state to stop"); case "started": ClientYamlTestResponse stopResponse = - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until started state reached stopped state"); diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml index e3c512560a992..3cd7a43e69c08 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml @@ -5,10 +5,10 @@ - do: headers: { es-security-runas-user: powerless_user } catch: forbidden - xpack.watcher.stats: {} + watcher.stats: {} # there seems to be a bug in the yaml parser we use, where a single element list # has the END_LIST token skipped...so here we just rerun the same request without # the impersonation to show it works - do: - xpack.watcher.stats: {} + watcher.stats: {} - match: { stats.0.watcher_state: started } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml index ec0be2532a6ee..b50f20afd0358 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml @@ -18,7 +18,7 @@ setup: --- teardown: - do: - xpack.watcher.delete_watch: + watcher.delete_watch: id: "my_watch" ignore: 404 @@ -27,7 +27,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -63,13 +63,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -83,7 +83,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -119,14 +119,14 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: headers: { es-security-runas-user: x_pack_rest_user } - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -140,7 +140,7 @@ teardown: - do: # by impersonating this request as powerless user we cannot query the my_test_index # headers: { es-security-runas-user: powerless_user } - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -176,13 +176,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } # because we are not allowed to read the index, there wont be any data @@ -193,7 +193,7 @@ teardown: --- "Test watch search transform is run as user who added the watch": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -227,7 +227,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } @@ -242,7 +242,7 @@ teardown: --- "Test watch search transform does not work without permissions": - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -277,7 +277,7 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } @@ -294,7 +294,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -318,13 +318,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } @@ -343,7 +343,7 @@ teardown: - skip: features: headers - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -367,13 +367,13 @@ teardown: - match: { _id: "my_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_watch" - match: { _id: "my_watch" } - is_false: watch.status.headers - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { watch_record.watch_id: "my_watch" } - match: { watch_record.state: "executed" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index a7350fcff03d1..8f30ec417117c 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -63,7 +63,7 @@ public void startWatcher() throws Exception { }); assertBusy(() -> { - for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) { Response templateExistsResponse = adminClient().performRequest(new Request("HEAD", "/_template/" + template)); assertThat(templateExistsResponse.getStatusLine().getStatusCode(), is(200)); } diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java index 771302a99bbfb..19c82c8cef799 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java @@ -35,13 +35,13 @@ public static Iterable parameters() throws Exception { public void startWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { case "stopped": ClientYamlTestResponse startResponse = - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until stopped state reached started state"); @@ -58,7 +58,7 @@ public void startWatcher() throws Exception { }); assertBusy(() -> { - for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", singletonMap("name", template), emptyList(), emptyMap()); assertThat(templateExistsResponse.getStatusCode(), is(200)); @@ -70,7 +70,7 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); switch (state) { @@ -83,7 +83,7 @@ public void stopWatcher() throws Exception { throw new AssertionError("waiting until starting state reached started state to stop"); case "started": ClientYamlTestResponse stopResponse = - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); assertThat(isAcknowledged, is(true)); throw new AssertionError("waiting until started state reached stopped state"); diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml index 50ee1f6eafdb9..5a5a4dfe46205 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml @@ -26,7 +26,7 @@ - set: { docs.0.doc._source.port: port } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: trigger: @@ -53,7 +53,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: test_watch body: record_execution: true diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml index bb52ee7f8d176..6371e743821ab 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch": { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml index e252932393262..9371040a0ff50 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml @@ -36,7 +36,7 @@ indices.refresh: {} - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml index 074b8d0fea7ca..d712ddba3a498 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - do: index: index: idx @@ -51,7 +51,7 @@ setup: --- "Test search input mustache integration (using request body)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -126,7 +126,7 @@ setup: - match: { acknowledged: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -172,7 +172,7 @@ setup: version: " - 6.99.99" reason: "rest_total_hits_as_int support was added in 7.0" - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -250,7 +250,7 @@ setup: - match: { acknowledged: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml index abad97ae944f8..08ff0fae5ba7b 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml @@ -3,7 +3,7 @@ setup: - do: cluster.health: wait_for_status: yellow - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - do: index: index: idx @@ -51,7 +51,7 @@ setup: --- "Test search transform mustache integration (using request body)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { @@ -110,7 +110,7 @@ setup: --- "Test search transform mustache integration (using request template)": - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml index 713dbb65b3db6..e11809a79baa5 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -1,5 +1,8 @@ --- "Test url escaping with url mustache function": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/41172" - do: cluster.health: wait_for_status: yellow @@ -12,7 +15,7 @@ refresh: true body: { foo: bar } - - do: {xpack.watcher.stats:{}} + - do: {watcher.stats:{}} - match: { "stats.0.watcher_state": "started" } - match: { "stats.0.watch_count": 0 } @@ -38,7 +41,7 @@ - set: { docs.0.doc._source.port: port } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "test_watch" body: metadata: @@ -73,7 +76,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "test_watch" - do: diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml index 4ea2a8dc7ab74..7f6db2a6d6614 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -80,7 +80,7 @@ - match: { _id: "my_exe_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_exe_watch" - match: { _id: "my_exe_watch" } @@ -88,7 +88,7 @@ - match: { watch.input.chain.inputs.1.second.transform.script.source: "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_exe_watch" body: > { @@ -132,7 +132,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -207,7 +207,7 @@ - match: { _id: "my_exe_watch" } - do: - xpack.watcher.get_watch: + watcher.get_watch: id: "my_exe_watch" - match: { _id: "my_exe_watch" } @@ -215,7 +215,7 @@ - match: { watch.input.chain.inputs.1.second.transform.script.source: "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_exe_watch" body: > { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml index f5f0a6cd04b0d..b95d2843aeb88 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_logging_watch" body: > { @@ -34,7 +34,7 @@ - match: { _id: "my_logging_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_logging_watch" - match: { "watch_record.watch_id": "my_logging_watch" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml index f9ad2a42414bd..d4ff40b9b084a 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml @@ -5,7 +5,7 @@ wait_for_status: green - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "trigger_data" : { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml index b8839ea364e45..411ef8426552a 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml @@ -6,7 +6,7 @@ - do: catch: bad_request - xpack.watcher.put_watch: + watcher.put_watch: id: "my_exe_watch" body: > { @@ -42,7 +42,7 @@ wait_for_status: green - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { @@ -80,7 +80,7 @@ - do: catch: bad_request - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch": { diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml index 89e6602035c2a..e764505f9c058 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml @@ -21,7 +21,7 @@ } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -50,7 +50,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -69,7 +69,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -97,7 +97,7 @@ } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "my_watch" body: > { @@ -126,7 +126,7 @@ - match: { _id: "my_watch" } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } @@ -144,7 +144,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "my_watch" - match: { "watch_record.watch_id": "my_watch" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml index 69fd7b4d575ee..d8545110c024e 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.execute_watch: + watcher.execute_watch: body: > { "watch" : { diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle index e33c0fa6d69f8..49d5c6861fa45 100644 --- a/x-pack/qa/third-party/active-directory/build.gradle +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -9,7 +9,10 @@ dependencies { testFixtures.useFixture ":x-pack:test:smb-fixture" // add test resources from security, so tests can use example certs -sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +processTestResources { + from(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +} + compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" // we have to repeat these patterns because the security test resources are effectively in the src of this project @@ -19,7 +22,7 @@ forbiddenPatterns { exclude '**/*.der' } -unitTest { +test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each * other if we allow them to set the number of available processors as it's set-once in Netty. diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java index 85d4955cc7f87..c9306eaf8477f 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -43,7 +43,7 @@ public void init() throws Exception { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java index faf225668e198..b3e470a05fcd2 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -78,7 +78,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO }); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); diff --git a/x-pack/qa/third-party/hipchat/build.gradle b/x-pack/qa/third-party/hipchat/build.gradle deleted file mode 100644 index f864fb62398a4..0000000000000 --- a/x-pack/qa/third-party/hipchat/build.gradle +++ /dev/null @@ -1,31 +0,0 @@ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile project(path: xpackModule('watcher'), configuration: 'runtime') -} - -String integrationAccount = System.getenv('hipchat_auth_token_integration') -String userAccount = System.getenv('hipchat_auth_token_user') -String v1Account = System.getenv('hipchat_auth_token_v1') - -integTestCluster { - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' - setting 'xpack.notification.hipchat.account.integration_account.profile', 'integration' - setting 'xpack.notification.hipchat.account.integration_account.room', 'test-watcher' - setting 'xpack.notification.hipchat.account.user_account.profile', 'user' - setting 'xpack.notification.hipchat.account.v1_account.profile', 'v1' - keystoreSetting 'xpack.notification.hipchat.account.integration_account.secure_auth_token', integrationAccount - keystoreSetting 'xpack.notification.hipchat.account.user_account.secure_auth_token', userAccount - keystoreSetting 'xpack.notification.hipchat.account.v1_account.secure_auth_token', v1Account -} - -if (!integrationAccount && !userAccount && !v1Account) { - integTest.enabled = false - testingConventions.enabled = false -} diff --git a/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java b/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java deleted file mode 100644 index 785b9d3a89249..0000000000000 --- a/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.smoketest; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; -import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.is; - -/** Runs rest tests against external cluster */ -public class WatcherHipchatYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public WatcherHipchatYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } - - @Before - public void startWatcher() throws Exception { - final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); - assertBusy(() -> { - try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); - - for (String template : watcherTemplates) { - ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", - singletonMap("name", template), emptyList(), emptyMap()); - assertThat(templateExistsResponse.getStatusCode(), is(200)); - } - - ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); - String state = (String) response.evaluate("stats.0.watcher_state"); - assertThat(state, is("started")); - } catch (IOException e) { - throw new AssertionError(e); - } - }); - } - - @After - public void stopWatcher() throws Exception { - assertBusy(() -> { - try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); - ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); - String state = (String) response.evaluate("stats.0.watcher_state"); - assertThat(state, is("stopped")); - } catch (IOException e) { - throw new AssertionError(e); - } - }); - } -} diff --git a/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml b/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml deleted file mode 100644 index bd4751cac4b78..0000000000000 --- a/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml +++ /dev/null @@ -1,279 +0,0 @@ ---- -"Test Hipchat v1 account Action": - - do: - cluster.health: - wait_for_status: yellow - - - do: - xpack.watcher.put_watch: - id: "hipchat_v1_watch" - body: > - { - "trigger": { - "schedule": { - "interval": "1d" - } - }, - "input": { - "simple": { - "foo": "something from input" - } - }, - "actions": { - "my_hipchat_action": { - "hipchat": { - "account": "v1_account", - "message": { - "from" : "watcher-tests", - "room" : ["test-watcher", "test-watcher-2", "test watcher with spaces"], - "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the v1 account", - "format": "text", - "color": "red", - "notify": true - } - } - } - } - } - - - do: - xpack.watcher.execute_watch: - id: "hipchat_v1_watch" - body: > - { - "record_execution": true - } - - - match: { watch_record.trigger_event.type: "manual" } - - match: { watch_record.state: "executed" } - - match: { watch_record.result.actions.0.id: "my_hipchat_action" } - - match: { watch_record.result.actions.0.type: "hipchat" } - - match: { watch_record.result.actions.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.account: "v1_account" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.1.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.2.status: "success" } - - # Waits for the watcher history index to be available - - do: - cluster.health: - index: ".watcher-history-*" - wait_for_no_relocating_shards: true - timeout: 60s - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - index: ".watcher-history-*" - body: > - { - "query" : { - "term" : { - "watch_id" : "hipchat_v1_watch" - } - } - } - - - match: { hits.total: 1 } - - match: { hits.hits.0._source.state: "executed" } - - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } - - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } - - match: { hits.hits.0._source.result.actions.0.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.account: "v1_account" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.status: "success" } - ---- -"Test Hipchat integration account Action": - - do: - cluster.health: - wait_for_status: yellow - - # custom rooms, custom users and custom from are not allowed for this account type to be configured - - do: - xpack.watcher.put_watch: - id: "hipchat_integration_account_watch" - body: > - { - "trigger": { - "schedule": { - "interval": "1d" - } - }, - "input": { - "simple": { - "foo": "something from input" - } - }, - "actions": { - "my_hipchat_action": { - "hipchat": { - "account": "integration_account", - "message": { - "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the integration account", - "format": "text", - "color": "red", - "notify": true - } - } - } - } - } - - - do: - xpack.watcher.execute_watch: - id: "hipchat_integration_account_watch" - body: > - { - "record_execution": true - } - - - match: { watch_record.trigger_event.type: "manual" } - - match: { watch_record.state: "executed" } - - match: { watch_record.result.actions.0.id: "my_hipchat_action" } - - match: { watch_record.result.actions.0.type: "hipchat" } - - match: { watch_record.result.actions.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.account: "integration_account" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - # Waits for the watcher history index to be available - - do: - cluster.health: - index: ".watcher-history-*" - wait_for_no_relocating_shards: true - timeout: 60s - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - index: ".watcher-history-*" - body: > - { - "query" : { - "term" : { - "watch_id" : "hipchat_integration_account_watch" - } - } - } - - - match: { hits.total: 1 } - - match: { hits.hits.0._source.state: "executed" } - - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } - - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } - - match: { hits.hits.0._source.result.actions.0.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.account: "integration_account" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } - ---- -"Test Hipchat user account Action": - - do: - cluster.health: - wait_for_status: yellow - - - do: - xpack.watcher.put_watch: - id: "hipchat_user_account_watch" - body: > - { - "trigger": { - "schedule": { - "interval": "1d" - } - }, - "input": { - "simple": { - "foo": "something from input" - } - }, - "actions": { - "my_hipchat_action": { - "hipchat": { - "account": "user_account", - "message": { - "user" : [ "watcher@elastic.co" ], - "room" : ["test-watcher", "test-watcher-2", "test watcher with spaces"], - "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the user_account test. bold", - "format": "html" - } - } - } - } - } - - - do: - xpack.watcher.execute_watch: - id: "hipchat_user_account_watch" - body: > - { - "record_execution": true - } - - - match: { watch_record.trigger_event.type: "manual" } - - match: { watch_record.state: "executed" } - - match: { watch_record.result.actions.0.id: "my_hipchat_action" } - - match: { watch_record.result.actions.0.type: "hipchat" } - - match: { watch_record.result.actions.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.account: "user_account" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.3.user: "watcher@elastic.co" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.1.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.2.status: "success" } - - match: { watch_record.result.actions.0.hipchat.sent_messages.3.status: "success" } - - # Waits for the watcher history index to be available - - do: - cluster.health: - index: ".watcher-history-*" - wait_for_no_relocating_shards: true - timeout: 60s - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - index: ".watcher-history-*" - body: > - { - "query" : { - "term" : { - "watch_id" : "hipchat_user_account_watch" - } - } - } - - - match: { hits.total: 1 } - - match: { hits.hits.0._source.state: "executed" } - - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } - - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } - - match: { hits.hits.0._source.result.actions.0.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.account: "user_account" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.3.user: "watcher@elastic.co" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.status: "success" } - - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } - - diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java index 8218d0e18f67b..8f8792f26971e 100644 --- a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java @@ -37,10 +37,10 @@ public static Iterable parameters() throws Exception { @Before public void startWatcher() throws Exception { - final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml index 55573f0c0f0c7..d914b63391146 100644 --- a/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml +++ b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch" body: > { @@ -43,7 +43,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch" body: > { @@ -126,7 +126,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "wrong_jira_watch" body: > { @@ -161,7 +161,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "wrong_jira_watch" body: > { @@ -226,7 +226,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch_with_custom_field_one" body: > { @@ -262,7 +262,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch_with_custom_field_one" body: > { @@ -277,7 +277,7 @@ - match: { watch_record.state: "executed" } - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "jira_watch_with_custom_field_two" body: > { @@ -318,7 +318,7 @@ - match: { created: true } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "jira_watch_with_custom_field_two" body: > { diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java index 019609793e38c..b9a628f71f972 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java @@ -37,10 +37,10 @@ public static Iterable parameters() throws Exception { @Before public void startWatcher() throws Exception { - final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml index fa83c8e8e8ce1..82fb47e4bc14c 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml +++ b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "pagerduty_watch" body: > { @@ -44,7 +44,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "pagerduty_watch" body: > { diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java index f6e8222ea73a0..01eeae442b2e0 100644 --- a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java @@ -37,10 +37,10 @@ public static Iterable parameters() throws Exception { @Before public void startWatcher() throws Exception { - final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES_NO_ILM); assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.start", emptyMap(), emptyList(), emptyMap()); for (String template : watcherTemplates) { ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", @@ -49,7 +49,7 @@ public void startWatcher() throws Exception { } ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("started")); } catch (IOException e) { @@ -62,9 +62,9 @@ public void startWatcher() throws Exception { public void stopWatcher() throws Exception { assertBusy(() -> { try { - getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stop", emptyMap(), emptyList(), emptyMap()); ClientYamlTestResponse response = - getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + getAdminExecutionContext().callApi("watcher.stats", emptyMap(), emptyList(), emptyMap()); String state = (String) response.evaluate("stats.0.watcher_state"); assertThat(state, is("stopped")); } catch (IOException e) { diff --git a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml index f5719ddbfc00c..6da232fc2ffb4 100644 --- a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml +++ b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml @@ -5,7 +5,7 @@ wait_for_status: yellow - do: - xpack.watcher.put_watch: + watcher.put_watch: id: "slack_watch" body: > { @@ -76,7 +76,7 @@ } - do: - xpack.watcher.execute_watch: + watcher.execute_watch: id: "slack_watch" body: > { diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index 7e7ed3ee3a862..e3c7ae96063d0 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.build' dependencies { - compile 'org.ow2.asm:asm:7.0' + compile 'org.ow2.asm:asm:7.1' compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/test/idp-fixture/README.txt b/x-pack/test/idp-fixture/README.txt index fff29f38797c1..8e42bb142e4ee 100644 --- a/x-pack/test/idp-fixture/README.txt +++ b/x-pack/test/idp-fixture/README.txt @@ -1,6 +1 @@ -Provisions OpenLDAP + shibboleth IDP 3.3.1 . -Uses ansible on the guest. - -Run: `vagrant up --provision` - -Any issues: albert.zaharovits@elastic.co +Provisions OpenLDAP + shibboleth IDP 3.4.2 using docker compose diff --git a/x-pack/test/idp-fixture/Vagrantfile b/x-pack/test/idp-fixture/Vagrantfile deleted file mode 100644 index c05fa33ba1a7a..0000000000000 --- a/x-pack/test/idp-fixture/Vagrantfile +++ /dev/null @@ -1,24 +0,0 @@ -Vagrant.configure("2") do |config| - - config.vm.define "test.shibboleth.elastic.local" do |config| - config.vm.box = "elastic/ubuntu-16.04-x86_64" - end - - config.vm.hostname = "localhost" - - if Vagrant.has_plugin?("vagrant-cachier") - config.cache.scope = :box - end - - config.vm.network "forwarded_port", guest: 389, host: 60389, protocol: "tcp" - config.vm.network "forwarded_port", guest: 636, host: 60636, protocol: "tcp" - config.vm.network "forwarded_port", guest: 8080, host: 60080, protocol: "tcp" - config.vm.network "forwarded_port", guest: 8443, host: 60443, protocol: "tcp" - - config.vm.provision "ansible_local" do |ansible| - ansible.verbose = "v" - ansible.playbook = "src/main/resources/provision/playbook.yml" - ansible.install_mode = "pip" - end - -end diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index e7fbfc3779ae4..c55123e08d0f1 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -1,42 +1,4 @@ apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.test.fixtures' -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" -] - -String box = "test.shibboleth.elastic.local" - -task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars -} - -task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn update -} - -task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'halt' - boxName box - environmentVars vagrantEnvVars -} - -task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'destroy' - args '-f' - boxName box - environmentVars vagrantEnvVars - dependsOn halt -} - -thirdPartyAudit.enabled = false -unitTest.enabled = false -jarHell.enabled = false +test.enabled = false \ No newline at end of file diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml new file mode 100644 index 0000000000000..53fb62855164d --- /dev/null +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -0,0 +1,40 @@ +version: '3.1' +services: + openldap: + command: --copy-service --loglevel debug + image: "osixia/openldap:1.2.3" + ports: + - "389" + - "636" + environment: + LDAP_ADMIN_PASSWORD: "NickFuryHeartsES" + LDAP_DOMAIN: "oldap.test.elasticsearch.com" + LDAP_BASE_DN: "DC=oldap,DC=test,DC=elasticsearch,DC=com" + LDAP_TLS: "true" + LDAP_TLS_CRT_FILENAME: "ldap_server.pem" + LDAP_TLS_CA_CRT_FILENAME: "ca_server.pem" + LDAP_TLS_KEY_FILENAME: "ldap_server.key" + LDAP_TLS_VERIFY_CLIENT: "never" + LDAP_TLS_CIPHER_SUITE: "NORMAL" + LDAP_LOG_LEVEL: 256 + volumes: + - ./openldap/ldif/users.ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom/20-bootstrap-users.ldif + - ./openldap/ldif/config.ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom/10-bootstrap-config.ldif + - ./openldap/certs:/container/service/slapd/assets/certs + + shibboleth-idp: + image: "unicon/shibboleth-idp:3.4.2" + depends_on: + - openldap + environment: + - JETTY_MAX_HEAP=64m + - JETTY_BROWSER_SSL_KEYSTORE_PASSWORD=secret + - JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD=secret + ports: + - "4443" + links: + - openldap:openldap + volumes: + - ./idp/shibboleth-idp/conf:/opt/shibboleth-idp/conf + - ./idp/shibboleth-idp/credentials:/opt/shibboleth-idp/credentials + - ./idp/shib-jetty-base/start.d/ssl.ini:/opt/shib-jetty-base/start.d/ssl.ini diff --git a/x-pack/test/idp-fixture/idp/shib-jetty-base/start.d/ssl.ini b/x-pack/test/idp-fixture/idp/shib-jetty-base/start.d/ssl.ini new file mode 100644 index 0000000000000..1a20bd1a612bc --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shib-jetty-base/start.d/ssl.ini @@ -0,0 +1,4 @@ +--module=ssl +jetty.ssl.port=4443 +jetty.sslContext.keyStorePath=/opt/shibboleth-idp/credentials/idp-browser.p12 +jetty.sslContext.keyStoreType=PKCS12 \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/access-control.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/access-control.xml new file mode 100644 index 0000000000000..a9184e6c72ac4 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/access-control.xml @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/general-admin.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/general-admin.xml new file mode 100644 index 0000000000000..2814bf629cb13 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/general-admin.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/metrics.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/metrics.xml new file mode 100644 index 0000000000000..f9b5c162f3e92 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/admin/metrics.xml @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-filter.xml similarity index 96% rename from x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml rename to x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-filter.xml index 09892d65f6edb..3dec2143d6227 100644 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-filter.xml @@ -13,7 +13,6 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:mace:shibboleth:2.0:afp http://shibboleth.net/schema/idp/shibboleth-afp.xsd"> - diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-resolver.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-resolver.xml new file mode 100644 index 0000000000000..00c24e94236ff --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/attribute-resolver.xml @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + uid mail cn + + + + + + + + cn + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/audit.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/audit.xml new file mode 100644 index 0000000000000..43f029d9d8167 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/audit.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + http://shibboleth.net/ns/profiles/status + http://shibboleth.net/ns/profiles/mdquery + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-comparison.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-comparison.xml new file mode 100644 index 0000000000000..f167b7a330d10 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-comparison.xml @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:unspecified + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-events-flow.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-events-flow.xml new file mode 100644 index 0000000000000..8846677810804 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/authn-events-flow.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo-authn-config.xml new file mode 100644 index 0000000000000..2867f484f5f35 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo-authn-config.xml @@ -0,0 +1,29 @@ + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo.properties b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo.properties new file mode 100644 index 0000000000000..d479fc29be851 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/duo.properties @@ -0,0 +1,30 @@ +## Duo integration settings + +## Note: If upgrading from pre-3.3 IdP versions, you will need to manually add a pointer +## to this property file to idp.properties. + +## The first set of properties support DuoWeb "iframe" integration. + +idp.duo.apiHost = hostname +idp.duo.applicationKey = key +idp.duo.integrationKey = key +idp.duo.secretKey = key + +## The second set are used for direct AuthAPI usage for ECP support. +## A seperate integration has to be created for this to work. + +#idp.duo.nonbrowser.apiHost = %{idp.duo.apiHost} +#idp.duo.nonbrowser.applicationKey = key +#idp.duo.nonbrowser.integrationKey = key +#idp.duo.nonbrowser.secretKey = key + +## Request header names for Duo non-browser credentials. +# idp.duo.nonbrowser.header.factor = X-Shibboleth-Duo-Factor +# idp.duo.nonbrowser.header.device = X-Shibboleth-Duo-Device +# idp.duo.nonbrowser.header.factor = X-Shibboleth-Duo-Passcode + +## Enables auto selection of factor/device if not specified by client. +# idp.duo.nonbrowser.auto = true + +## Enables transmission of client address to Duo during authentication. +# idp.duo.nonbrowser.clientAddressTrusted = true diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/external-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/external-authn-config.xml new file mode 100644 index 0000000000000..8b3a1596f9f2c --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/external-authn-config.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + UnknownUsername + + + + + InvalidPassword + + + + + ExpiredPassword + + + + + ExpiringPassword + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/function-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/function-authn-config.xml new file mode 100644 index 0000000000000..cf7876af69399 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/function-authn-config.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/general-authn.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/general-authn.xml new file mode 100644 index 0000000000000..56990225ec246 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/general-authn.xml @@ -0,0 +1,158 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ipaddress-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ipaddress-authn-config.xml new file mode 100644 index 0000000000000..a3ee096f3aafe --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ipaddress-authn-config.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas-authn-config.xml new file mode 100644 index 0000000000000..daef4d2b74007 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas-authn-config.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + ShibUserPassAuth + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas.config b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas.config new file mode 100644 index 0000000000000..232e93d427179 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/jaas.config @@ -0,0 +1,11 @@ +ShibUserPassAuth { + /* + com.sun.security.auth.module.Krb5LoginModule required; + */ + + org.ldaptive.jaas.LdapLoginModule required + ldapUrl="ldap://localhost:10389" + baseDn="ou=people,dc=example,dc=org" + userFilter="uid={user}"; + +}; \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/krb5-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/krb5-authn-config.xml new file mode 100644 index 0000000000000..d3590a2adceb1 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/krb5-authn-config.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ldap-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ldap-authn-config.xml new file mode 100644 index 0000000000000..56d1bc7f8e729 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/ldap-authn-config.xml @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/mfa-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/mfa-authn-config.xml new file mode 100644 index 0000000000000..f1b3918d03363 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/mfa-authn-config.xml @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/password-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/password-authn-config.xml new file mode 100644 index 0000000000000..f27051bec5f02 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/password-authn-config.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + NoCredentials + CLIENT_NOT_FOUND + Client not found + DN_RESOLUTION_FAILURE + + + + + InvalidCredentials + PREAUTH_FAILED + INVALID_CREDENTIALS + Checksum failed + + + + + AccountLocked + Clients credentials have been revoked + + + + + PASSWORD_EXPIRED + + + + + ACCOUNT_WARNING + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-authn-config.xml new file mode 100644 index 0000000000000..4b7e722528edb --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-authn-config.xml @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + NoCredentials + + + + + UnknownUsername + + + + + InvalidPassword + + + + + ExpiredPassword + + + + + ExpiringPassword + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-internal-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-internal-authn-config.xml new file mode 100644 index 0000000000000..9e68c854bdc2c --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/remoteuser-internal-authn-config.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/spnego-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/spnego-authn-config.xml new file mode 100644 index 0000000000000..07563b9a9549e --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/spnego-authn-config.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SPNEGONotAvailable + + + + + NTLMUnsupported + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-authn-config.xml new file mode 100644 index 0000000000000..18b015a81029f --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-authn-config.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + NoCredentials + InvalidCredentials + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-internal-authn-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-internal-authn-config.xml new file mode 100644 index 0000000000000..bad3029307dd0 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/authn/x509-internal-authn-config.xml @@ -0,0 +1,21 @@ + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/attribute-sourced-subject-c14n-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/attribute-sourced-subject-c14n-config.xml new file mode 100644 index 0000000000000..938b30f11867e --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/attribute-sourced-subject-c14n-config.xml @@ -0,0 +1,44 @@ + + + + + + altuid + + + + + altuid + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/simple-subject-c14n-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/simple-subject-c14n-config.xml new file mode 100644 index 0000000000000..3cddfa67ac70b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/simple-subject-c14n-config.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n-events-flow.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n-events-flow.xml new file mode 100644 index 0000000000000..c4936f31401af --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n-events-flow.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n.xml new file mode 100644 index 0000000000000..9740fdd76d1b7 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/subject-c14n.xml @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress + urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName + urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName + urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/x500-subject-c14n-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/x500-subject-c14n-config.xml new file mode 100644 index 0000000000000..1ae25e4058179 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/c14n/x500-subject-c14n-config.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + 2.5.4.3 + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/cas-protocol.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/cas-protocol.xml new file mode 100644 index 0000000000000..535cf0aa5245b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/cas-protocol.xml @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/credentials.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/credentials.xml new file mode 100644 index 0000000000000..dde530b7f28f1 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/credentials.xml @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/errors.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/errors.xml new file mode 100644 index 0000000000000..a9f4074527436 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/errors.xml @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/global.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/global.xml new file mode 100644 index 0000000000000..457a814d00743 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/global.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/idp.properties b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/idp.properties new file mode 100644 index 0000000000000..f03eaf6d6d792 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/idp.properties @@ -0,0 +1,212 @@ +# Load any additional property resources from a comma-delimited list +idp.additionalProperties=/conf/ldap.properties, /conf/saml-nameid.properties, /conf/services.properties, /conf/authn/duo.properties + +# In most cases (and unless noted in the surrounding comments) the +# commented settings in the distributed files are the default +# behavior for V3. +# +# Uncomment them and change the value to change functionality. + +# Set the entityID of the IdP +idp.entityID=https://test.shibboleth.elastic.local/ + +# Set the file path which backs the IdP's own metadata publishing endpoint at /shibboleth. +# Set to empty value to disable and return a 404. +#idp.entityID.metadataFile=%{idp.home}/metadata/idp-metadata.xml + +# Set the scope used in the attribute resolver for scoped attributes +idp.scope=test.elastic.co + +# General cookie properties (maxAge only applies to persistent cookies) +# Note the default for idp.cookie.secure, you will usually want it set. +#idp.cookie.secure = false +#idp.cookie.httpOnly = true +#idp.cookie.domain = +#idp.cookie.path = +#idp.cookie.maxAge = 31536000 + +# HSTS/CSP response headers +#idp.hsts = max-age=0 +# X-Frame-Options value, set to DENY or SAMEORIGIN to block framing +#idp.frameoptions = DENY +# Content-Security-Policy value, set to match X-Frame-Options default +#idp.csp = frame-ancestors 'none'; + +# Set the location of user-supplied web flow definitions +#idp.webflows = %{idp.home}/flows + +# Set the location of Velocity view templates +#idp.views = %{idp.home}/views + +# Settings for internal AES encryption key +#idp.sealer.storeType = JCEKS +#idp.sealer.updateInterval = PT15M +#idp.sealer.aliasBase = secret +idp.sealer.storeResource=%{idp.home}/credentials/sealer.jks +idp.sealer.versionResource=%{idp.home}/credentials/sealer.kver +idp.sealer.storePassword=secret +idp.sealer.keyPassword=secret + +# Settings for public/private signing and encryption key(s) +# During decryption key rollover, point the ".2" properties at a second +# keypair, uncomment in credentials.xml, then publish it in your metadata. +idp.signing.key=%{idp.home}/credentials/idp-signing.key +idp.signing.cert=%{idp.home}/credentials/idp-signing.crt +idp.encryption.key=%{idp.home}/credentials/idp-encryption.key +idp.encryption.cert=%{idp.home}/credentials/idp-encryption.crt +#idp.encryption.key.2 = %{idp.home}/credentials/idp-encryption-old.key +#idp.encryption.cert.2 = %{idp.home}/credentials/idp-encryption-old.crt + +# Sets the bean ID to use as a default security configuration set +#idp.security.config = shibboleth.DefaultSecurityConfiguration + +# To downgrade to SHA-1, set to shibboleth.SigningConfiguration.SHA1 +#idp.signing.config = shibboleth.SigningConfiguration.SHA256 + +# To upgrade to AES-GCM encryption, set to shibboleth.EncryptionConfiguration.GCM +# This is unlikely to work for all SPs, but this is a quick way to test them. +#idp.encryption.config = shibboleth.EncryptionConfiguration.CBC + +# Configures trust evaluation of keys used by services at runtime +# Defaults to supporting both explicit key and PKIX using SAML metadata. +#idp.trust.signatures = shibboleth.ChainingSignatureTrustEngine +# To pick only one set to one of: +# shibboleth.ExplicitKeySignatureTrustEngine, shibboleth.PKIXSignatureTrustEngine +#idp.trust.certificates = shibboleth.ChainingX509TrustEngine +# To pick only one set to one of: +# shibboleth.ExplicitKeyX509TrustEngine, shibboleth.PKIXX509TrustEngine + +# If true, encryption will happen whenever a key to use can be located, but +# failure to encrypt won't result in request failure. +#idp.encryption.optional = false + +# Configuration of client- and server-side storage plugins +#idp.storage.cleanupInterval = PT10M +#idp.storage.htmlLocalStorage = false + +# Set to true to expose more detailed errors in responses to SPs +#idp.errors.detailed = false +# Set to false to skip signing of SAML response messages that signal errors +#idp.errors.signed = true +# Name of bean containing a list of Java exception classes to ignore +#idp.errors.excludedExceptions = ExceptionClassListBean +# Name of bean containing a property set mapping exception names to views +#idp.errors.exceptionMappings = ExceptionToViewPropertyBean +# Set if a different default view name for events and exceptions is needed +#idp.errors.defaultView = error + +# Set to false to disable the IdP session layer +#idp.session.enabled = true + +# Set to "shibboleth.StorageService" for server-side storage of user sessions +#idp.session.StorageService = shibboleth.ClientSessionStorageService + +# Size of session IDs +#idp.session.idSize = 32 +# Bind sessions to IP addresses +#idp.session.consistentAddress = true +# Inactivity timeout +#idp.session.timeout = PT60M +# Extra time to store sessions for logout +#idp.session.slop = PT0S +# Tolerate storage-related errors +#idp.session.maskStorageFailure = false +# Track information about SPs logged into +#idp.session.trackSPSessions = false +# Support lookup by SP for SAML logout +#idp.session.secondaryServiceIndex = false +# Length of time to track SP sessions +#idp.session.defaultSPlifetime = PT2H + +# Regular expression matching login flows to enable, e.g. IPAddress|Password +idp.authn.flows=Password + +# Default lifetime and timeout of various authentication methods +#idp.authn.defaultLifetime = PT60M +#idp.authn.defaultTimeout = PT30M + +# Whether to populate relying party user interface information for display +# during authentication, consent, terms-of-use. +#idp.authn.rpui = true + +# Whether to prioritize "active" results when an SP requests more than +# one possible matching login method (V2 behavior was to favor them) +#idp.authn.favorSSO = false + +# Whether to fail requests when a user identity after authentication +# doesn't match the identity in a pre-existing session. +#idp.authn.identitySwitchIsError = false + +# Set to "shibboleth.StorageService" or custom bean for alternate storage of consent +#idp.consent.StorageService = shibboleth.ClientPersistentStorageService + +# Set to "shibboleth.consent.AttributeConsentStorageKey" to use an attribute +# to key user consent storage records (and set the attribute name) +#idp.consent.attribute-release.userStorageKey = shibboleth.consent.PrincipalConsentStorageKey +#idp.consent.attribute-release.userStorageKeyAttribute = uid +#idp.consent.terms-of-use.userStorageKey = shibboleth.consent.PrincipalConsentStorageKey +#idp.consent.terms-of-use.userStorageKeyAttribute = uid + +# Suffix of message property used as value of consent storage records when idp.consent.compareValues is true. +# Defaults to text displayed to the user. +#idp.consent.terms-of-use.consentValueMessageCodeSuffix = .text + +# Flags controlling how built-in attribute consent feature operates +#idp.consent.allowDoNotRemember = true +#idp.consent.allowGlobal = true +#idp.consent.allowPerAttribute = false + +# Whether attribute values and terms of use text are compared +#idp.consent.compareValues = false +# Maximum number of consent records for space-limited storage (e.g. cookies) +#idp.consent.maxStoredRecords = 10 +# Maximum number of consent records for larger/server-side storage (0 = no limit) +#idp.consent.expandedMaxStoredRecords = 0 + +# Time in milliseconds to expire consent storage records. +#idp.consent.storageRecordLifetime = P1Y + +# Whether to lookup metadata, etc. for every SP involved in a logout +# for use by user interface logic; adds overhead so off by default. +#idp.logout.elaboration = false + +# Whether to require logout requests/responses be signed/authenticated. +#idp.logout.authenticated = true + +# Message freshness and replay cache tuning +#idp.policy.messageLifetime = PT3M +#idp.policy.clockSkew = PT3M + +# Set to custom bean for alternate storage of replay cache +#idp.replayCache.StorageService = shibboleth.StorageService +#idp.replayCache.strict = true + +# Toggles whether to allow outbound messages via SAML artifact +#idp.artifact.enabled = true +# Suppresses typical signing/encryption when artifact binding used +#idp.artifact.secureChannel = true +# May differ to direct SAML 2 artifact lookups to specific server nodes +#idp.artifact.endpointIndex = 2 +# Set to custom bean for alternate storage of artifact map state +#idp.artifact.StorageService = shibboleth.StorageService + +# Comma-delimited languages to use if not match can be found with the +# browser-supported languages, defaults to an empty list. +idp.ui.fallbackLanguages=en,fr,de + +# Storage service used by CAS protocol +# Defaults to shibboleth.StorageService (in-memory) +# MUST be server-side storage (e.g. in-memory, memcached, database) +# NOTE that idp.session.StorageService requires server-side storage +# when CAS protocol is enabled +#idp.cas.StorageService=shibboleth.StorageService + +# CAS service registry implementation class +#idp.cas.serviceRegistryClass=net.shibboleth.idp.cas.service.PatternServiceRegistry + +# F-TICKS auditing - set a salt to include hashed username +#idp.fticks.federation=MyFederation +#idp.fticks.algorithm=SHA-256 +#idp.fticks.salt=somethingsecret +#idp.fticks.loghost=localhost +#idp.fticks.logport=514 diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/consent-intercept-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/consent-intercept-config.xml new file mode 100644 index 0000000000000..a0892117b67cd --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/consent-intercept-config.xml @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + transientId + persistentId + eduPersonTargetedID + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/context-check-intercept-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/context-check-intercept-config.xml new file mode 100644 index 0000000000000..aae07f0f25042 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/context-check-intercept-config.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + * + + + + + + + + + + + + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/expiring-password-intercept-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/expiring-password-intercept-config.xml new file mode 100644 index 0000000000000..b3bf96d525e79 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/expiring-password-intercept-config.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/impersonate-intercept-config.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/impersonate-intercept-config.xml new file mode 100644 index 0000000000000..7dfda2b9126fa --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/impersonate-intercept-config.xml @@ -0,0 +1,25 @@ + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/intercept-events-flow.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/intercept-events-flow.xml new file mode 100644 index 0000000000000..6214e8080c181 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/intercept-events-flow.xml @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/profile-intercept.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/profile-intercept.xml new file mode 100644 index 0000000000000..7b4c8aa900bce --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/intercept/profile-intercept.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/ldap.properties b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/ldap.properties new file mode 100644 index 0000000000000..4b36d79942ffe --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/ldap.properties @@ -0,0 +1,24 @@ +idp.authn.LDAP.authenticator = bindSearchAuthenticator +idp.authn.LDAP.ldapURL = ldap://openldap:389 +idp.authn.LDAP.useStartTLS = false +idp.authn.LDAP.sslConfig = certificateTrust +idp.authn.LDAP.trustCertificates= %{idp.home}/credentials/ca_server.pem +idp.authn.LDAP.baseDN = ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.authn.LDAP.subtreeSearch = true +idp.authn.LDAP.userFilter = (uid={user}) +idp.authn.LDAP.bindDN = cn=admin,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.authn.LDAP.bindDNCredential = NickFuryHeartsES +idp.authn.LDAP.dnFormat = uid=%s,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.attribute.resolver.LDAP.ldapURL = ldap://openldap:389 +idp.attribute.resolver.LDAP.useStartTLS = false +idp.attribute.resolver.LDAP.sslConfig = certificateTrust +idp.attribute.resolver.LDAP.trustCertificates= %{idp.home}/credentials/ca_server.pem +idp.attribute.resolver.LDAP.baseDN = ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.attribute.resolver.LDAP.subtreeSearch = true +idp.attribute.resolver.LDAP.userFilter = (uid={user}) +idp.attribute.resolver.LDAP.bindDN = cn=admin,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.attribute.resolver.LDAP.bindDNCredential = NickFuryHeartsES +idp.attribute.resolver.LDAP.dnFormat = uid=%s,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +idp.attribute.resolver.LDAP.searchFilter = (uid={user}) +idp.attribute.resolver.LDAP.connectTimeout=5000 +idp.attribute.resolver.LDAP.responseTimeout=5000 \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/logback.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/logback.xml new file mode 100644 index 0000000000000..e1f38b838308b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/logback.xml @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + UTF-8 + %date{ISO8601} - %mdc{idp.remote_addr} - %level [%logger:%line] - %msg%n%ex{short} + + + + + + + VelocityStatusMatcher + ResourceManager : unable to find resource 'status.vm' in any resource loader. + + VelocityStatusMatcher.matches(formattedMessage) + + DENY + + + + + + 0 + + + + + ${idp.fticks.loghost:-localhost} + ${idp.fticks.logport:-514} + AUTH + [%thread] %logger %msg + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/metadata-providers.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/metadata-providers.xml new file mode 100644 index 0000000000000..d7230054af64b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/metadata-providers.xml @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/relying-party.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/relying-party.xml similarity index 100% rename from x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/relying-party.xml rename to x-pack/test/idp-fixture/idp/shibboleth-idp/conf/relying-party.xml diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.properties b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.properties new file mode 100644 index 0000000000000..bbc1597073149 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.properties @@ -0,0 +1,37 @@ +# Properties involving SAML NameIdentifier/NameID generation/consumption + +# For the most part these settings only deal with "transient" and "persistent" +# identifiers. See saml-nameid.xml and c14n/subject-c14n.xml for advanced +# settings + +# Comment out to disable legacy NameID generation via Attribute Resolver +#idp.nameid.saml2.legacyGenerator = shibboleth.LegacySAML2NameIDGenerator +#idp.nameid.saml1.legacyGenerator = shibboleth.LegacySAML1NameIdentifierGenerator + +# Default NameID Formats to use when nothing else is called for. +# Don't change these just to change the Format used for a single SP! +#idp.nameid.saml2.default = urn:oasis:names:tc:SAML:2.0:nameid-format:transient +#idp.nameid.saml1.default = urn:mace:shibboleth:1.0:nameIdentifier + +# Set to shibboleth.StoredTransientIdGenerator for server-side transient ID storage +#idp.transientId.generator = shibboleth.CryptoTransientIdGenerator + +# Persistent IDs can be computed on the fly with a hash, or managed in a database + +# For computed IDs, set a source attribute and a secret salt: +#idp.persistentId.sourceAttribute = changethistosomethingreal +#idp.persistentId.useUnfilteredAttributes = true +# Do *NOT* share the salt with other people, it's like divulging your private key. +#idp.persistentId.algorithm = SHA +#idp.persistentId.salt = changethistosomethingrandom +# BASE64 will match V2 values, we recommend BASE32 encoding for new installs. +idp.persistentId.encoding = BASE32 + +# To use a database, use shibboleth.StoredPersistentIdGenerator +#idp.persistentId.generator = shibboleth.ComputedPersistentIdGenerator +# For basic use, set this to a JDBC DataSource bean name: +#idp.persistentId.dataSource = PersistentIdDataSource +# For advanced use, set to a bean inherited from shibboleth.JDBCPersistentIdStore +#idp.persistentId.store = MyPersistentIdStore +# Set to an empty property to skip hash-based generation of first stored ID +#idp.persistentId.computed = shibboleth.ComputedPersistentIdGenerator diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.xml new file mode 100644 index 0000000000000..7d82cf52048c2 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/saml-nameid.xml @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.properties b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.properties new file mode 100644 index 0000000000000..0aa900731a42b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.properties @@ -0,0 +1,65 @@ +# Configure the resources to load for various services, +# and the settings for failure handling and auto-reload. + +# failFast=true prevents IdP startup if a configuration is bad +# checkInterval = PT0S means never reload (this is the default) + +# Global default for fail-fast behavior of most subsystems +# with individual override possible below. +#idp.service.failFast = false + +#idp.service.logging.resource = %{idp.home}/conf/logback.xml +#idp.service.logging.failFast = true +idp.service.logging.checkInterval = PT5M + +# Set to shibboleth.LegacyRelyingPartyResolverResources with legacy V2 relying-party.xml +#idp.service.relyingparty.resources = shibboleth.RelyingPartyResolverResources +#idp.service.relyingparty.failFast = false +idp.service.relyingparty.checkInterval = PT15M + +#idp.service.metadata.resources = shibboleth.MetadataResolverResources +#idp.service.metadata.failFast = false +#idp.service.metadata.checkInterval = PT0S + +#idp.service.attribute.resolver.resources = shibboleth.AttributeResolverResources +#idp.service.attribute.resolver.failFast = false +idp.service.attribute.resolver.checkInterval = PT15M +#idp.service.attribute.resolver.maskFailures = true +#idp.service.attribute.resolver.stripNulls = false + +#idp.service.attribute.filter.resources = shibboleth.AttributeFilterResources +# NOTE: Failing the filter fast leaves no filters enabled. +#idp.service.attribute.filter.failFast = false +idp.service.attribute.filter.checkInterval = PT15M +#idp.service.attribute.filter.maskFailures = true + +#idp.service.nameidGeneration.resources = shibboleth.NameIdentifierGenerationResources +#idp.service.nameidGeneration.failFast = false +idp.service.nameidGeneration.checkInterval = PT15M + +#idp.service.access.resources = shibboleth.AccessControlResources +#idp.service.access.failFast = true +idp.service.access.checkInterval = PT5M + +#idp.service.cas.registry.resources = shibboleth.CASServiceRegistryResources +#idp.service.cas.registry.failFast = false +idp.service.cas.registry.checkInterval = PT15M + +#idp.message.resources = shibboleth.MessageSourceResources +#idp.message.cacheSeconds = 300 + +# Parameters for pre-defined HttpClient instances which perform in-memory and filesystem caching. +# These are used with components such as remote configuration resources that are explicitly wired +# with these client instances, *not* by default with HTTP metadata resolvers. +#idp.httpclient.useSecurityEnhancedTLSSocketFactory = false +#idp.httpclient.connectionDisregardTLSCertificate = false +#idp.httpclient.connectionRequestTimeout = PT1M +#idp.httpclient.connectionTimeout = PT1M +#idp.httpclient.socketTimeout = PT1M +#idp.httpclient.maxConnectionsTotal = 100 +#idp.httpclient.maxConnectionsPerRoute = 100 +#idp.httpclient.memorycaching.maxCacheEntries = 50 +#idp.httpclient.memorycaching.maxCacheEntrySize = 1048576 +#idp.httpclient.filecaching.maxCacheEntries = 100 +#idp.httpclient.filecaching.maxCacheEntrySize = 10485760 +idp.httpclient.filecaching.cacheDirectory = %{idp.home}/tmp/httpClientCache \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.xml new file mode 100644 index 0000000000000..e04ac8f0afceb --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/services.xml @@ -0,0 +1,100 @@ + + + + + + + + + %{idp.home}/conf/relying-party.xml + %{idp.home}/conf/credentials.xml + %{idp.home}/system/conf/relying-party-system.xml + + + + + %{idp.home}/conf/relying-party.xml + %{idp.home}/system/conf/legacy-relying-party-defaults.xml + + + + %{idp.home}/conf/metadata-providers.xml + %{idp.home}/system/conf/metadata-providers-system.xml + + + + %{idp.home}/conf/attribute-resolver.xml + + + + %{idp.home}/conf/attribute-filter.xml + + + + %{idp.home}/conf/saml-nameid.xml + %{idp.home}/system/conf/saml-nameid-system.xml + + + + %{idp.home}/conf/access-control.xml + %{idp.home}/system/conf/access-control-system.xml + + + + %{idp.home}/conf/cas-protocol.xml + + + + + %{idp.home}/messages/messages + %{idp.home}/system/messages/messages + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/session-manager.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/session-manager.xml new file mode 100644 index 0000000000000..7372029719c54 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/conf/session-manager.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/README b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/README new file mode 100644 index 0000000000000..a4e6f92c18cfe --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/README @@ -0,0 +1,2 @@ +openssl req -x509 -newkey rsa:4096 -nodes -keyout idp-browser.key -out idp-browser.pem -days 10000 -subj "/CN=localhost" +openssl pkcs12 -in idp-browser.pem -inkey idp-browser.key -export -out idp-browser.p12 diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/ca_server.pem b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/ca_server.pem new file mode 100644 index 0000000000000..d090cbf6d3abc --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/ca_server.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAmugAwIBAgIUGJBYO12hG0Uo/jCDsHqz9KNR25cwDQYJKoZIhvcNAQEL +BQAwGTEXMBUGA1UEAxMOTERBUCBTZXJ2ZXIgQ0EwHhcNMTkwMzA4MTY1MzI2WhcN +MjAwMzA3MTY1MzI2WjAZMRcwFQYDVQQDEw5MREFQIFNlcnZlciBDQTCCAaIwDQYJ +KoZIhvcNAQEBBQADggGPADCCAYoCggGBALtPoq9PwkRZyV2ZKU98RoVxERWOfVw3 +W5QmU+jBDD7vz0xRvjD2Xz+AtAqLyYCE90d8xnaxY42rZ7IXjd9K56rwVuvjgBfV +6iVV1Vo5q5yxsZTsiR+Zrjd3F/AvhTXRsn2GLo2rS2Q64CjodJqzhPvLiZSoUCXs +sdv+sGCZ8H4Wjomygu4HzoS+y+b6wEfcynd7c0thEsYcqacGcvsxXOSU2bnyx8Z4 +T3VeYb2i+DWXBtFACzC/bzVzft+Kui+buktgkdCTkJe2Q/TWjqlhGH/o3A98xZRm ++UDnyJG3Et4MDLNmhpizSsFw8gB4DsK7OrcepcPzsAYvG0VoI5flQnraMNTlTZ5X +bwgtzpRolDEx4qKKPfcYauLEuP5YsteVbPcZwUBNxF+YWi800XDN2D8abUrQ1NyK +fKAoOf7xAHGzZm78lvuhWxKL7/L0TFMCtxdYxzNXQsT4ZgvbtKT3moc8Rw0Jyc3p +/C45CpEwlqDdYcEx2bM/gaFR/KkUbEGyawIDAQABo0MwQTAPBgNVHRMBAf8EBTAD +AQH/MA8GA1UdDwEB/wQFAwMHBAAwHQYDVR0OBBYEFP+p8954Rx6o/i4G/3U6qVKu +lsDJMA0GCSqGSIb3DQEBCwUAA4IBgQCdoxcQxfKd209+TsXjymbrMSWBbfDHLQml +ocKF2EF92hOBJnAm5E8Z2+ZHAZ0hbsl8r1dSIhZuqb7GGE7KfN+mFTP0YlteciPg +0NDM0GsaryftkWe9Lwkt9M72vHP8sCjorskpmYWhHBWgkTfd5Uzso0w3wYNJpeuX +qGOjrjcfpRrDk/fyWUgVPkTBmmEEFWCJHSZF8N+BMHrTiw9UsJXBwbHa983Sm9Lq +pK61y2LTo9OHBHCFl0DmzHiH7AEg9RmJlUqMomn8b/gquIoplUchUtS/h7BVPgqz +w6vS3fE8FgHODthGv55xqhaPV23ZFv6QUlc80yzY3BhYyfu21O/gzo3wmKC6fq9L +pWK2CYgdtDAV0vjTchkYM12iJDLJBdNoLzVZrXqxFMWjVZN2N7GVyc3PGzyOAquf +LuuvSAhErt84mh8MwAbVie09iDTbqPgFjn+D1AK70ocfMVGM2lwOmk0vvCLHtxE7 +fh/9Tf0GS1BHkmgzWwXqTkCK71mJQvs= +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.crt b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.crt new file mode 100644 index 0000000000000..d9f2e9b24c785 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEUjCCArqgAwIBAgIVALjpmEEY4k5+2/JXwk5y+MoDnWiAMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMMHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsMB4X +DTE5MDMwODEzMTg1OVoXDTM5MDMwODEzMTg1OVowKDEmMCQGA1UEAwwddGVzdC5z +aGliYm9sZXRoLmVsYXN0aWMubG9jYWwwggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAw +ggGKAoIBgQC98CVCuq4MBwBGia77i7tdUMFql+KbY5mY/eENspY59aPZbI6/oMSU +v4Uu3iJux8gGlBmkzxoefhuIA93lE+RS3k1iezxtS0fQW8drYCtJRT9j8xuEdzo9 +hHV3wa4lhnC8Mfn6K79jzj0Y6PkDCSS3wn80YYAQpI0sV6baxGuRByL7BMjHITFw +EbFYsph6zQ/3SZu1OWocVdgZkF+n3zraUDuj87M+Tn6RRhWF0Jq/vFAUNCTWr7Iw +GTUib9LWx3v5IqXttu9NBMSd5Q5mIoPWW+/RA3mpcsN51rVRa2od6HABpiEv7I7B +yEtBVNPkYfOPF2TIxefeyIl3UCD5sF/2nDAZfe/ENnJFQwqnPAXwXrlPlLCoWsOl +OOHfyE7tbORkVXNuWYq9TcTNLEIuB081Utxsoa+iiWGyn0/uYDg2NGxJ2V2/PISf +LhUilCJSmNbMUFirVSUJD8mntn8VRidn1/Wx7RCphPGmMmJtAN080/+zCOJVE4/G +6JnRMFpNZO0CAwEAAaNzMHEwHQYDVR0OBBYEFH9wTxThSEj7nswt0sIjbpw86itt +MFAGA1UdEQRJMEeCHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FshiZodHRw +czovL3Rlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsLzANBgkqhkiG9w0BAQsF +AAOCAYEApBpPTKvKUwcZ8Tfm3TZPINmSXI05OWU1Xu8gdFB0EoUDFE9/uHTJgfmS +5ZjUT8YGhHH883XKNq9JhmyCb5V5ZSjONzMGplcsTAkk0guU0DqKvabyVsQuyne6 +nCkbAr/srtJFutiFJDGmfAmhTSFTVmpI4stpyC+y5Y0PhcUQa6Ytz8wGTMJvftv5 +2S5zhX9IDaWo4C97WWnSXaFepi5n1i4wlCj5HgDkEhoeEt5byJMxzDdN9whEI5Ju +VPMUcRcJNajnSKA1SVAznUpJbHSu0WAfM/MQ9ucX3o3A5J/bA9xlknsEWtugtvRK +KCwuibrOddrv/cQQbpPjDhWk/VF7tOA1kuSHHdYICB+WjWU463bv0rLm6QXqkDU4 +f3XuG2zJTmVQEzBdmv9ATlqQ+sADV216Vyy5I/delW0WnmePGTuxBj+Xm7VrbYNi +WOveqK14343Bpbo8H/V2UvTPGF+oICnmw0fgG7SChtcDY9won6mRVCcy+01TeOua +TSYuiKRN +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.p12 b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.p12 new file mode 100644 index 0000000000000..4c19a5c22cb83 Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-backchannel.p12 differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.key b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.key new file mode 100644 index 0000000000000..fb9aae8983d30 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.key @@ -0,0 +1,54 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-CBC,448E229091A1814C + +2GcUr2daklgGwVg37U/wo1qdWQhr1PYL6q77UWyWTnjgMC00hHwPF7ChkLmD8jXa +eSxk3Myih85SudgOFydnDAecj3WZ5s1T1LuH1jXN5yg2WAjQKwzoxKvAELfjbX4A +gM5cNMI62DfpFJHnj4WVJioFvXuNB58TM30D1yFZbqUm5I2nThxzoNVKUuOwZ+Ct +Xr/rpX0AaKLJJ7iIws/iByBLvf75/mmharHWTqJV/dh/mqvpstvMXtlr/YpL0jq7 +I1nF9jutGGCkdUmCnXEjE5eGOdgOd8hPuHIHgDAWILqbaGNSmMs3KaJWrOxSp0i2 +ilQPmjqSSI7KKGDay9UeEF9FPuLQvSk1S9/e4IC2lG30wqZpJ8kY7DJmhXXvG7eg +6BGUOpZ/haXqltI9gNRa/PlME7zk26S0KPSqorh5ajDeMKcX7g8aQEh4kM7Rp5Ne +94dW2mwUj1mqHrkgPJewExlRq6TE23DUA4fbm45VnducaIhz6nw6deQNzArnl61+ +yLXjd6R+usRJubh2NK5yKWAJczI7bzyTU1B+wYFK9m9HTGi3JQFFG4hGwktjSy39 +2HHO5swES2di18Yq0unzAagK9+ngAkWECIRjswqaFSfFW9FZdsACiS62BTcXygHv +AP6dm1ofL1kO8/QYF+fTKmLP+mTtVYs0TE219u5gAXODHIMDIdpM4O/weAcoAjP+ +WVDefvFC/A986ZogG/xePyh01fkQFRWjSUq73IWOxflWQOZC6ur65YDhFhcNRfq7 ++pEv5oourj4613ei1THhA/OZKedn3oqTxCZcp4BeTksI3lwJKWxlO799UHb2hiKR +ICnNskE7jvYEQLJS7oesqJjtme3ERaevRVZJXf9h+/1Ohv4PZOtNHS4EKVzECPJn +OLtr221QRkyqME4PeI5u6pTkTQj8y2sC6jZ17nlNXDtP6bCiLUsB64HDAhu1+7gm +ahsFcwe66iNo8UHptKSlVl+3kjSHLO6MvDvFSxuk5ZbA/6leXEnl/9sm0vjE/dgj +n+Q+WbE0+plfP91AR3KeH/NNPF6R6n6aFAIo6ZK4inUKTSe7QEK9qgJo5I3nIhoa +hwKkf3rvcNYsPrB0n98ncoR7CSjjfjzou6m5oXgtsdiC8IrnjsUXY8VsfHqrBbJD +usqa0E0R1n2atfiFs7Lmm2tJLIfYnTyfxCoU9U6IbPbwgoD11vy6wZV138BXL4Vq +E/7JZGP1E7/M3TziFgg8tQRVw9Y7tKI1AFXH5FPbTLubqqp8oiH5QSSUC2lsDTRV +I6DA43W2Et2gn16moW3qFekO6qkQh3XYLiyH76zLQ44ebsB3wdwONxOcCpDIvikw +UWKnJ4XXAHbVs8B4myShjUN3yiDMHQ5g0dkJDgd0oJv5lYZJD2LH/xn+FJfVN+Xr +csD6Dgv7mAsvaOTtv3s4zBNtWEdnZpOy4agsFr+dCH9V/Wqq3Cp8y6nOAGHW1Yw7 +HS7s7cZqssEhSAoQUgtqLr4MsfysDM9LhPSc/wHGt9lOfTyJkp1B8rWuIkMy/A2C +YU92pHLXtd1VW0gJd410GUXKNRB8gw4MakRvKCEQgaARXMFVYUJGfTPP43ZcDgeK +FBpeYT+7lJxm0GN9B8hzcTI3BiD+efZRXVw+qltOq/I0m7896lMJjoigF1i8y/ou +537CyaCmasK6V4b2omF78omRYa8xG/6UUTV5n3RA7+KTEvk8RyCl1+Hqu1Yk9Tnk +bjR2xMnF6PG7MUWzOaDFjlL+LWb5OF9gsmkBOz+Gf8wc0ZKFvkoqgv65Q/miklKO +A/jPY8KzWFMAXHo3JP+PiFlLAmJHVpnMumUCycd62P3Omp8HWgyz3dG68ubwlQlU +OwYaJapke1SazUBNdGIxFw4PPJbd/AuoTb0hKznVZrAGW/QmH6vtQLIL38t3i97b +ZW3SmhF1CmBngcVe4jmWtv+ElWNfB83XpLdF8Az7JxKruwvQpsIkn8lZKpEaibP8 +RyqbDIdIvgetcHgSZTZrHROzk+a2XFBxvX0FG48I15yY5KG2/uZrg5KWztXF/wh8 +GB2m6OVko2+6jb9ffs3ur+NTYBbWHgt0VaZW/TJMduIar6z7KRPahCdPJE+ZJL9B +Y1UzzmQhyp5bgTtrn9/NwXotM62OCiTNm7+asdUOkj5Yg8NG//CbRcjPmIO9zLkt +qZnyvf187oFZoaoqAcxrBCBHVUC5iqi3gJ3IVtjKM/qN+uqazuayzEl5XqCXCIIY +EQycYGOXzGZJp9ZpUlbnsazUAwxqo+eoxpzRXw66dLAZRcriXGhvcittVKuVAbxr +rRs8rYXvGYyUlNH15qiGzU7zRiKIzjV43m6KSFxGPX0vQdKif9kNc66YJ5C3FmwD +RZT3u/YED+QUg9QGNIhIJX+boV6yBjiZp+TUGiEfymPPbf7MXiUdGo1TWuSYjC+r +GOI80Bt6np5gXeQxW8vIEVspiPkiFqag9xrG3XfWZjYnLg1ue8sufrSu7B9oRwXJ +1E4l02nSCrb+PHlrEDC0BfK+3cGyahu58ayX1LfgMMGoYn1Z5lirIRlZr+lFNq7V +F001QMJLCAwoUv2pNheJTD2GuvJDY4F+BepQe0GiMhRKTxiUEyHpuVPUenBn+yKj +S1Jo12oVxNZvPx1z4dbqbeQBu1uLwLpGj39HW+FvuQYA1pi+sAyBGV5mTnOaAyyK +qRVevAYOVqToXmIrmwqFirl4q/UHX+8KKXZE7+t5l6jx1ik9fbwPBk9xYTguRUDR +JMLlwq6z9wz405z4yL4wcr988kqEp8rflUEBOP7uQXHAqHPVRV8eidkuW/Q7XmbP +s3lxo3CW22txdZvrhkIRTAPuHAoVf06F/DSKufpwTYDEd6vwRgupC1K8Cj8irVjV +PzZmwQDzgIhp3PvuiIxqiQRRGbu2uZa4YnryFG5ROY32S8u4CJ/3YrdnK9x5ZhwI +L2KVja++t1EAcq7lvAelV5+5k4CMGYkiHVRZc8oM6H6DQFeanD3wo/wJExOwp07M +y3Lg8snXFFKbwpIPoMFvo8tc+AvxpNR3ByycxcuEdjKPCLZaAZ7WpjWDm5INJRmc +QpYqhhwVWGLITtrW0z8vJEIffDT7dx/Z1pPkmBhqXgfQgr6650sravzrvWooYmmL +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.p12 b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.p12 new file mode 100644 index 0000000000000..30d0583b37895 Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.p12 differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.pem b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.pem new file mode 100644 index 0000000000000..51324a9c4d75a --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-browser.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIFCTCCAvGgAwIBAgIUXgnF25GwdlFAe0byzIYbN3AT2WIwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE5MDMwODIxMTAzMFoXDTQ2MDcy +NDIxMTAzMFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEAth/Euk/QuccVqvRBivjqn6T/1KRRxnAeSVnbfaww1XS2 +aIj7CjAmuOblA3MPZcnpO9l3xTwcLGCzoel/BopjF6rbQM92U/XcZO3i5p9SrXZ3 +tLn0RWbv3+0yN3J8tbsBNsoIoOCdBurBmwRMCtQn+WaX04BIVDOeRRPGrtnQaKGk +vtKigxf5GOLsVlvmb4749MUdtR+PrmtNWpN47MLpMCLEWK0OS1CyAgUyu+ktJCMV +aBX29aHK3y7oIURgFrXtsMWavCtMhGHZYvuKOhfoBszWM6zEC73NyuKX8CewIoRO +HnV+r9SpHuTqVS0Pcrts8TtBcLWCbGUlHl2IYc36tiWw5H0/85im0h62o+b2OXoj +1PV2xSPBQf970YvjJhPu7OIxde3uIGcITWfcMTYINdpuguII0WJnVXbzNnluUoNg +c1QudVfFbNU0M9WexL7JRp3++IZKynuCzJ8g/LAUArt5OetYz8CsGkR9ijd4S42P +9t9DrMb71XWwV8aKxjTcfh2GEYbG/ZRZdZnXyuNrt04uoCrNp8q5yv+xtJeKG2So +PfgpzckpZhk9P4WqSz0JXLn1WweCjEOdJ31NVbGqWjQAPYmGNjb6k0Jf/wdopmtV +UehZUnUg1zeI39JSd189Z9NR/Up7yi/Mn/TVCC8TOouWs4QYcuWal/B1JF2PZlEC +AwEAAaNTMFEwHQYDVR0OBBYEFKSTsih4bvNakB2hY87W4PZj5fK2MB8GA1UdIwQY +MBaAFKSTsih4bvNakB2hY87W4PZj5fK2MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggIBACfJSHzEx2fTGFrHDh5DCacixQmK5NDNh8kpQyuTLFyF2LZ+ ++iYqEQjd8Mqd3i6NDzv199P9RrrpnKG+SfrIKRff98sHAssxwLXzHTZJePJMWauP +JfCX2wxojPHWa/3cXBcxPQa3T/HAvN4U9GMIziQllmr/7R8LBqtkWBwei7/3lQAH +RYStfdC1ROG4rbrLgVhEKf0SQ8cFcHkrxxTsomVI+8aS9cCLPdKo0N4jaV4hVocv +nD6QpDIFzqpLYYFTRSNK3niJ30pxHbwlzluj9oYQcdX9zS+CkiX6N+ELPcR7J3/h +0Pt7HkII5XU43yf3ApRVodIW2bdEL1i4ccH86gzvoijaJJdGazXXNjnCsL6zuJPT +tLfQEbMHPzGOUnkfVC6eO/4POqXva9cZd+bk+SpbDJCJowOHTpl7U3SEFgttiJXo +N4+BZhHVBhUxjLGXqiZmtaffXeXXuWzVbYj/ve3ymL++hCkh3niqajpdu2cKvRaN +VDNi5tO7nwruMaAmsUi6KJVui8tFB9CtDbK/UTeNfi2xUrFrPUJICyUW7wnb1ayL +w+FPAjV5+OBZ1HOsnP3ktC4e7WWfo/8pENMRPMr62oJS244Docdzg6HCdu2SBSK+ +ulX+uylH2rmBDIQ6IAj8cSBYVKyQZFbUCERCoF1KxmChL+49h9cSB0SJTiFa +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.crt b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.crt new file mode 100644 index 0000000000000..514fe401b78b1 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEUTCCArmgAwIBAgIUcpV0WDMWve6/1FPqzjYJnMLDLRIwDQYJKoZIhvcNAQEL +BQAwKDEmMCQGA1UEAwwddGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWwwHhcN +MTkwMzA4MTMxODU5WhcNMzkwMzA4MTMxODU5WjAoMSYwJAYDVQQDDB10ZXN0LnNo +aWJib2xldGguZWxhc3RpYy5sb2NhbDCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAI4haOiDT0q1eBqwKWbOUfxTVQ2d3nC4wpPekB8YixcePBHu1GEKnCmO +E1/EHyhc5EA0p+IV4GRfwEC4bwWOFt5xY2XJREeXg21G1pQ0R2aHIb9cmJWuHHBt +4pIaEiOw1se4zfiTNgcO2WOc6ixmx/cJYjd+FvMPP3Y96JFycVt7f4DTUZ/IGUTO +jOjwRet9vpBHBzIIN+MEnarwd3mPt3FLhsMU09Q/C3Jfw9x+HcSrCQn7d/3ucvZL +iRdYfGxr6YVapwij5eRkVDx6pRi4jTWgNy7BGieWJLElDoPTapmPZxhTIkxnv9/A +HqZmOc2hErU/aPKe/H5doC9Sfj9jEhXURVYeK7Rsmq8IVW6WnVxODjO62/mS58Uu +slkLiNBd0GDx+PRm7YG1k3khJFihtL1xfKIE8PUEqemYIvoHY3/xTujplUDTdHlC +VRaf5/QhT+vgcn7JEXnWtgW1SzSg//lmN1htfOlPb4pXa/Y/Gg2JrsMXXgu098X8 +u++qOorK2QIDAQABo3MwcTAdBgNVHQ4EFgQUUxIigNlOHt4GZvE4XwmwPq0H2xsw +UAYDVR0RBEkwR4IddGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWyGJmh0dHBz +Oi8vdGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWwvMA0GCSqGSIb3DQEBCwUA +A4IBgQB3V3QLkSEnix65cZVH1x4ayZfkyYUeXqQEpjsnz4wGPmWXQp/vJ313mWsV +dtA6uCEHAhAi1It8QpOw0DkQQ39GYLANkra1skjGqOgqjsqCsEX9haaIVvRrG1YP +GA5C6nq88C4KgAYzhfzmOTkDvCLeM/adSV9q8y4juPsrOod81zBjp5GhLfraVUXq +6gwMjRqE+eEgX1RGiYDNl3oar4WB6+cCiTsbkJwHsmGc2LviHxlkPRSIPkJtTUAW +bTCN38FxQda9Ao6ZavSn2Da/HhQm1yxpOdEfTeBSFS5e6NSqj+X1Mc8y0L5cLUyZ +kqQ+q2xhMap2WyDxUm5db9OR5VkrpejR7+f+CYVD7CTt7bqoHQZgaOrGJLje0ZQX +cAeMgEFja4nK0TY8+eD+ijEZ3y/LRMsIl5Hetxaw1atgoVKociuhoF5wZSK0OsDQ +Qx/AEkhqDaSUfSZvSJlD645k/E20NGiK/a8nsquJ3Tkl8rQDEk8r/IKJVmo+vkuX +NgQpDdo= +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.key b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.key new file mode 100644 index 0000000000000..6d7535ba8e1cb --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-encryption.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG4wIBAAKCAYEAjiFo6INPSrV4GrApZs5R/FNVDZ3ecLjCk96QHxiLFx48Ee7U +YQqcKY4TX8QfKFzkQDSn4hXgZF/AQLhvBY4W3nFjZclER5eDbUbWlDRHZochv1yY +la4ccG3ikhoSI7DWx7jN+JM2Bw7ZY5zqLGbH9wliN34W8w8/dj3okXJxW3t/gNNR +n8gZRM6M6PBF632+kEcHMgg34wSdqvB3eY+3cUuGwxTT1D8Lcl/D3H4dxKsJCft3 +/e5y9kuJF1h8bGvphVqnCKPl5GRUPHqlGLiNNaA3LsEaJ5YksSUOg9NqmY9nGFMi +TGe/38AepmY5zaEStT9o8p78fl2gL1J+P2MSFdRFVh4rtGyarwhVbpadXE4OM7rb ++ZLnxS6yWQuI0F3QYPH49GbtgbWTeSEkWKG0vXF8ogTw9QSp6Zgi+gdjf/FO6OmV +QNN0eUJVFp/n9CFP6+ByfskReda2BbVLNKD/+WY3WG186U9vildr9j8aDYmuwxde +C7T3xfy776o6isrZAgMBAAECggGAOUyk06AkdlUEj5XXixfAm74ouu2bQMxrvVtr +tO2bHX3V79DeFCVH4cTicKhLe2U/q9bOm+n/5ddoVW9FavLmq1UBIobqnQYrOQHm +/ia5Zk8yPsigxy9DTdwvDIGwMG1ZmXkaqI0Iqf3FjFA1ZCMnmKNS0lZmACV0i0HY +IdMGekAmnhQTgxwTe9bdngdH2PF/wwCvmO8RQNeajNCXCg3zgZxWPw2FfLrR7ZFI +D/tUGvZtFXYo4NmhxUOciCnnN3qlNq4toWZdejQvy0nKUzjRyy/fRpA0CZY7eiIh +/jlOdEM4ZKnDi9i0PU9eqWNBvwM4I5qd5EOYUjZZ4rySGcSW9eoKAYIEcjhu93hl +o1YUjL/R74ySxluX/dC6WBR2GFJsQdUSm3vIoXUl64Fbhro0dgbUhXQI/fO15yeQ +lTYzQygFrDs3z9xvl0/ajvGQC9v63SfADkUvE3pUExqTVkYLj7vNBMQRcXYGZnyO +Ltp2DfkZ+P4/Y8mYOM8xG6VebEf1AoHBANQsx3Z7c4WE3yHcKcqvqKzzTmWZBwEd +NzGA2ARzVuYJhJxtpHSiY21qrV84xqqtEmYVrfXmB0f0iy+rARg8MbYUHXrrn3Ro +FC4HRX6c7uof66nDRjSIWIVYuU93k/H+5n9EvJ22keSANm5yCHnNViYDiJmrwS2B +vE9t3iGoJQzz+tPJ6Z9i71giZRRByePwQBEOsrbn5W57lStTDDAINlAwqMxqJcSg +4boNJURVAEFba2iGUvcDzuh6Zgep+KAx2wKBwQCrfOBmCvapiqW4WEvgMiPGkGEw +pJXwH2hzqNzM9E5xFj0ZXWToDmDpyWneO+KtPRX54+KqdzNrYoV9eX7a3w12hTd4 +U/dwqLBS3rtuJFWstZ93/RXcxsAvlHjKzZijfmhkS2Er/Aoifv+ItaYYvDZcBZW7 +5Qv8wb66E7cBmhoDm9Y09DkuJ39T9cLtVMH47+n84X39kMHr0EGoHoiIJ2fUpIVJ +xsLYEHcdzf7EgvgakzTUIre2zaBoUECoxiGE1lsCgcBwv6L3GT8YMrWH/sDOjm3q +Chkc8CbCts2/pqIkK06AS4qGvdTojeqcGFYtBbS8ud/PE7Ivu58e45qbKBoa0NHx +2dqYmWFQsIYYt5B8adSj4XaP7c7+132CztUan4ak0DmYY+R/Qqdgb6l9I2fRogYn +iTUrxBORpSF2bAHgjMOzkGs35FIbeq2Jfi2MiDFKyT6iUdcyTySig1WPgkA1vgdu +sYzho36ScJPDFH07RJgwIu5+yvKHhSSMcY6KgA44woUCgcAQeB+87RSfV7duqtwu +neIdscQDverAcuO/J9p8vIHEDpSzmOqV9ruL+FH4i9vLM3Cm3SHpqSUBDScw07hB +IObUGRsorTe4VBsK+mUZwNeRjKfSCDZPS68tYp7vKyubmApCh5K/fKckhPnZkUhw +W7MEx2yH7Qlnrtu4VyheOQSCkDbUQxgyvIxX7eRYkqlGVRQtuyI0dfCJyyhjEvud +x9sK6vn6CeKwiXRHIn9zM/jL5JNQRD1Mixb6rapoCw8xCL0CgcEAsYHLgf5lZYaB +ZON5w3+OJUEBypASaR8J0w8Na2uqs3KBTtclG6v7OiaWm8o9D/08W6VqA0X+lzCR +93gJuC2r+j+GTkfY77I7JV5AJzUaGSIPqbm7XWzGVsy/jKYksASMh9qgUaYdWjv9 +7Agqr4L/Jv0iwauDjd5y8EIkJ0vDHKceeupwzXO3aMPf9gi9n9VcFg7lcWc/jjCW +T4fbuKUAPUb/ttjBWKUhBiTBMg9aLbbGNbPO8WM/2Scrkepr7De0 +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.crt b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.crt new file mode 100644 index 0000000000000..db092d8f81825 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEUjCCArqgAwIBAgIVAMo/bS4nTf4TlWhmWu7auvJhceTMMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMMHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsMB4X +DTE5MDMwODEzMTg1OVoXDTM5MDMwODEzMTg1OVowKDEmMCQGA1UEAwwddGVzdC5z +aGliYm9sZXRoLmVsYXN0aWMubG9jYWwwggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAw +ggGKAoIBgQCGXWD/YD5VD52S6vmy1iRzYI7ZxAsyiDSOP3qQgRtcHssgNluTae6r +GIoj5m3tfQayHmh7ctI3zLhwReD9GlLZuV2JxaIfn/4e1N2dsLFvgYMmao9g41DE +/RG2Zjl/K9Ux3v0xUx5uoK0p68GR3ykYjf/Ea0AIJCx+SfRt6uOQ0nlAC19/gffJ +G1pzcbWxZMxx5jJ8bULtxbAgC7tW21V9kq6fBKwTpJS7XVC4xwou8Vi5NnoPwjbv +f9Agg1TuLI+Loa0+gK3exh0jGlHfvBZMobX8fZI7yDkt60hFe5g97R2yJuVekHR3 +VUyAG1xYWcS2l6tJ7XVQKRrwlwUumG0bsnBfa918p6V7pOsS5quKTCQpWkxpXO37 +BSbd3TrlpSde0ncFHg3bAxIPtlxbJe87CA4y9JuyrjqCqw8WC5clNCvlGBx5aKgu +986/TWK8neeJZTQRpo+Nwf+Q8tuNb21tYA7B04h2MMumdRcVJ5IXDuVuVZJecNZp +LZODW3bHiGECAwEAAaNzMHEwHQYDVR0OBBYEFGtVRTYHCuBUetx2T2KF8MkvAcwQ +MFAGA1UdEQRJMEeCHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FshiZodHRw +czovL3Rlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsLzANBgkqhkiG9w0BAQsF +AAOCAYEAe5fL8bstZsj6p1lZ3fIMjeRcz60BmDyIobQFTQu9PkRa3hcLZdCRtuNL +BfQbNr4Kymo8GP9c/RkmjIxl32cyCrt7kLWLurUptfYO8IgXpvnQ8EfdSO9PBDag +x5WVg8SHVJ+IeL2k+D/EnyQUisQPau0JmJCNQsAuzGrg8FmHa7IBDvWtAmO4BRRd +pZgyC/mfEwF8mbGoRJt7oHs30a+5RVHUhRDLhQD0+L3efgEvq+9lsDKIQHzG2ITg +Z5qqLkYP6nlNlMHU+xS9by4DT4u9IOOsQJ2DS/PNrYFRLNUwDKkdBFrhy3uM0UYl +e8Gvcow0paET4e1i50Urk7Yrnadcr7B9nNqHCrqW6QyuZmurhxyhAIiwYlc+fCVy +BUM9IlZ9yuQNRmTLr9irWbajfKSTtfzJDwid9ZKmKCDzDMci3oobVc8xTOGckoyB +QEyPGpCHu0JACjMUt/qqt/qD0xJTIibRrwPEOBKrxrCFwMfzdKnlmVUGNhI4cA88 +TvpYlz3p +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.key b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.key new file mode 100644 index 0000000000000..9c45533265bfc --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/idp-signing.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAhl1g/2A+VQ+dkur5stYkc2CO2cQLMog0jj96kIEbXB7LIDZb +k2nuqxiKI+Zt7X0Gsh5oe3LSN8y4cEXg/RpS2bldicWiH5/+HtTdnbCxb4GDJmqP +YONQxP0RtmY5fyvVMd79MVMebqCtKevBkd8pGI3/xGtACCQsfkn0berjkNJ5QAtf +f4H3yRtac3G1sWTMceYyfG1C7cWwIAu7VttVfZKunwSsE6SUu11QuMcKLvFYuTZ6 +D8I273/QIINU7iyPi6GtPoCt3sYdIxpR37wWTKG1/H2SO8g5LetIRXuYPe0dsibl +XpB0d1VMgBtcWFnEtperSe11UCka8JcFLphtG7JwX2vdfKele6TrEuarikwkKVpM +aVzt+wUm3d065aUnXtJ3BR4N2wMSD7ZcWyXvOwgOMvSbsq46gqsPFguXJTQr5Rgc +eWioLvfOv01ivJ3niWU0EaaPjcH/kPLbjW9tbWAOwdOIdjDLpnUXFSeSFw7lblWS +XnDWaS2Tg1t2x4hhAgMBAAECggGAVHAgQbxYz5m1m5tg+dsLIxqeCwnhQ4tHA9Gu +k1sfN3/+hKrEV9pta0ZLoY3wrRsx9wOXA2U/6GMV0TCI2GYPhNWxi6AQZzFjQDrj +XnKovvsz0ROMMQYJPxIObILHRTzliKaqlLG4YStGRx7qEWKdb9nkzqnbnoIlF+Qf +4GeJRPyPyAidpgKmFdLDv+LInMdMl5NjTMBZHBVsvmbA/y9tBTIF3NbR0LJP6YMV +9FjY9HElIZezk22omk/HGneOCvMi/ywPyyniw+0DkEBpfh32k9jAMBmIfK0+xL7g +PKUDGhvLgAKrh0s1Vu4xpIWlhR+71ImHNJfPLvRv4H39uzDZDkAA5sIvLFfctweF +wTzudDapDQbqvZubjmj8KoGzThuTxQL/dxOnR6I0nFLNTQdVGf5k4tyMeMr1Lb4C +mKpqGrmkkcgl4GLqE9FNcgXGY2nqLhJboQ7J+9ApT391rESbHHTaqCzeEAtcjLwJ +c+OE/n3J35u5PnvrTvFtkY9n01UBAoHBAOiry2q0B1rPWboR6tLu8ejhit9H1kYy +VosWieOyoH4XhyIwkNna2EXABoI5ZN2xGgjAud/ilbtMr5IoSszfBGeQvQVp0aYI +njWWi+xC2+/7wGIj4ButDhl11d8UUhOfZW23cwVAVQAT6nzeaQ/2c7I9rqvIMHNq +8s+CauT6Q9P4NT4X2JvoYmjkUqeoKyNfgXATOjez2NReG97KaSg7kD5FLAkwi9iG +8Lh0O4SwQu+SkqLMwMsG0CvZOV4RvW4AUQKBwQCT1kGOa4c1F6E1gOeaYIWgHfP5 +Y8C0+SyysjPIp+g/Ct3dmnmglTvowtNqqInFTbEV/xlbR5wg5Oe0f4Js6++nzIx0 +ScC1031oGukQSI06nsa5Xr2UJ2xNqaF4CDgXpzpFWbfcapVl77TOlnmR1Lla7t4A +HNo0SG7mHXtX1GYptYzMxsug7cltXgYNHt15R6m4pdxd8gYHThb0OLZyvS6saSFO ++e7EepekeOwGPO/T68iriaihdgKEYnPl5IPbkxECgcBTp2jUrXOzf7eoAccnHcgW +TflLPB3bUT/ydCERn+i6TSVF8qYx3dBCi78LVS1h08ojeX+a8nGZTQIXD6uNK2X/ +AngjmR9KjfCsHbujL4q6Jw7Nek6aRMc7+jTyJvE3ouip4PpW2gF1rLNzP9gQaDDC +1hCWLLzW0o4Ic8CRITX/4U7rS1GTBKNVSTo6k2knJ7B3JkKZQm1kNY3ZtXbHg1ri +OKaTi0PMjv+/ofLAS0tVraJRLELo/EMecNSCcOhlw3ECgcEAgmwdz6BrWtpq3UHI +kTggCt8B8v0e8El3NnriFl6TFLLogu0Z6iJeBP2D+G+Rg6AD32z7GL03LwnipClt +jdwjxHw/WQQL4B0hYKuE+1mWRqX2mWjMhRy2flltWvCdU7izPqVlAC6DeLedYjf8 +Ljxl/aQH9D9//Wn9dEOEIjr+mkNRYQLseB/veO3fFsopByrCv2SL4dF3ohAxTemO +g/EEcRQaT4tNKscgavhG+UXfKq2zjinWvALZO6ODrcgIz7iRAoHBAMqWH5QzlffH +mDhvm4gm7FcriNLSsS3OkoQ2u0DLmSIGlIdUUJLy9PVgJCV7RpGQM0pCm79l+Htz +DZzRtGX+qp1xQbM7TuDcD5Ft8a7ZvAZ0Jb/Jz3CKgqPOIkEqZvnCgPY349aopqlA +AKHavKVIyMYws2FpzXU7C59GW0xKq6F3OXbJig5HiRzltwb7sTyTPDYtX4GzHxBM +VRKTMpJhCjx+kUEmENE75PV8xMcNn3wZyAIpoK4l2NNvP2Ryzd1J6w== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.jks b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.jks new file mode 100644 index 0000000000000..598086ecb7eb2 Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.jks differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.kver b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.kver new file mode 100644 index 0000000000000..398a84290327b --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/credentials/sealer.kver @@ -0,0 +1,2 @@ +#Fri Mar 08 13:19:00 UTC 2019 +CurrentVersion=1 diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/metadata/idp-metadata.xml b/x-pack/test/idp-fixture/idp/shibboleth-idp/metadata/idp-metadata.xml new file mode 100644 index 0000000000000..7cec10ee3dfc1 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/metadata/idp-metadata.xml @@ -0,0 +1,121 @@ + + + + + + + + test.elastic.co + + + + + + + +MIIEUjCCArqgAwIBAgIVALjpmEEY4k5+2/JXwk5y+MoDnWiAMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMMHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsMB4X +DTE5MDMwODEzMTg1OVoXDTM5MDMwODEzMTg1OVowKDEmMCQGA1UEAwwddGVzdC5z +aGliYm9sZXRoLmVsYXN0aWMubG9jYWwwggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAw +ggGKAoIBgQC98CVCuq4MBwBGia77i7tdUMFql+KbY5mY/eENspY59aPZbI6/oMSU +v4Uu3iJux8gGlBmkzxoefhuIA93lE+RS3k1iezxtS0fQW8drYCtJRT9j8xuEdzo9 +hHV3wa4lhnC8Mfn6K79jzj0Y6PkDCSS3wn80YYAQpI0sV6baxGuRByL7BMjHITFw +EbFYsph6zQ/3SZu1OWocVdgZkF+n3zraUDuj87M+Tn6RRhWF0Jq/vFAUNCTWr7Iw +GTUib9LWx3v5IqXttu9NBMSd5Q5mIoPWW+/RA3mpcsN51rVRa2od6HABpiEv7I7B +yEtBVNPkYfOPF2TIxefeyIl3UCD5sF/2nDAZfe/ENnJFQwqnPAXwXrlPlLCoWsOl +OOHfyE7tbORkVXNuWYq9TcTNLEIuB081Utxsoa+iiWGyn0/uYDg2NGxJ2V2/PISf +LhUilCJSmNbMUFirVSUJD8mntn8VRidn1/Wx7RCphPGmMmJtAN080/+zCOJVE4/G +6JnRMFpNZO0CAwEAAaNzMHEwHQYDVR0OBBYEFH9wTxThSEj7nswt0sIjbpw86itt +MFAGA1UdEQRJMEeCHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FshiZodHRw +czovL3Rlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsLzANBgkqhkiG9w0BAQsF +AAOCAYEApBpPTKvKUwcZ8Tfm3TZPINmSXI05OWU1Xu8gdFB0EoUDFE9/uHTJgfmS +5ZjUT8YGhHH883XKNq9JhmyCb5V5ZSjONzMGplcsTAkk0guU0DqKvabyVsQuyne6 +nCkbAr/srtJFutiFJDGmfAmhTSFTVmpI4stpyC+y5Y0PhcUQa6Ytz8wGTMJvftv5 +2S5zhX9IDaWo4C97WWnSXaFepi5n1i4wlCj5HgDkEhoeEt5byJMxzDdN9whEI5Ju +VPMUcRcJNajnSKA1SVAznUpJbHSu0WAfM/MQ9ucX3o3A5J/bA9xlknsEWtugtvRK +KCwuibrOddrv/cQQbpPjDhWk/VF7tOA1kuSHHdYICB+WjWU463bv0rLm6QXqkDU4 +f3XuG2zJTmVQEzBdmv9ATlqQ+sADV216Vyy5I/delW0WnmePGTuxBj+Xm7VrbYNi +WOveqK14343Bpbo8H/V2UvTPGF+oICnmw0fgG7SChtcDY9won6mRVCcy+01TeOua +TSYuiKRN + + + + + + + + + +MIIEUjCCArqgAwIBAgIVAMo/bS4nTf4TlWhmWu7auvJhceTMMA0GCSqGSIb3DQEB +CwUAMCgxJjAkBgNVBAMMHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsMB4X +DTE5MDMwODEzMTg1OVoXDTM5MDMwODEzMTg1OVowKDEmMCQGA1UEAwwddGVzdC5z +aGliYm9sZXRoLmVsYXN0aWMubG9jYWwwggGiMA0GCSqGSIb3DQEBAQUAA4IBjwAw +ggGKAoIBgQCGXWD/YD5VD52S6vmy1iRzYI7ZxAsyiDSOP3qQgRtcHssgNluTae6r +GIoj5m3tfQayHmh7ctI3zLhwReD9GlLZuV2JxaIfn/4e1N2dsLFvgYMmao9g41DE +/RG2Zjl/K9Ux3v0xUx5uoK0p68GR3ykYjf/Ea0AIJCx+SfRt6uOQ0nlAC19/gffJ +G1pzcbWxZMxx5jJ8bULtxbAgC7tW21V9kq6fBKwTpJS7XVC4xwou8Vi5NnoPwjbv +f9Agg1TuLI+Loa0+gK3exh0jGlHfvBZMobX8fZI7yDkt60hFe5g97R2yJuVekHR3 +VUyAG1xYWcS2l6tJ7XVQKRrwlwUumG0bsnBfa918p6V7pOsS5quKTCQpWkxpXO37 +BSbd3TrlpSde0ncFHg3bAxIPtlxbJe87CA4y9JuyrjqCqw8WC5clNCvlGBx5aKgu +986/TWK8neeJZTQRpo+Nwf+Q8tuNb21tYA7B04h2MMumdRcVJ5IXDuVuVZJecNZp +LZODW3bHiGECAwEAAaNzMHEwHQYDVR0OBBYEFGtVRTYHCuBUetx2T2KF8MkvAcwQ +MFAGA1UdEQRJMEeCHXRlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FshiZodHRw +czovL3Rlc3Quc2hpYmJvbGV0aC5lbGFzdGljLmxvY2FsLzANBgkqhkiG9w0BAQsF +AAOCAYEAe5fL8bstZsj6p1lZ3fIMjeRcz60BmDyIobQFTQu9PkRa3hcLZdCRtuNL +BfQbNr4Kymo8GP9c/RkmjIxl32cyCrt7kLWLurUptfYO8IgXpvnQ8EfdSO9PBDag +x5WVg8SHVJ+IeL2k+D/EnyQUisQPau0JmJCNQsAuzGrg8FmHa7IBDvWtAmO4BRRd +pZgyC/mfEwF8mbGoRJt7oHs30a+5RVHUhRDLhQD0+L3efgEvq+9lsDKIQHzG2ITg +Z5qqLkYP6nlNlMHU+xS9by4DT4u9IOOsQJ2DS/PNrYFRLNUwDKkdBFrhy3uM0UYl +e8Gvcow0paET4e1i50Urk7Yrnadcr7B9nNqHCrqW6QyuZmurhxyhAIiwYlc+fCVy +BUM9IlZ9yuQNRmTLr9irWbajfKSTtfzJDwid9ZKmKCDzDMci3oobVc8xTOGckoyB +QEyPGpCHu0JACjMUt/qqt/qD0xJTIibRrwPEOBKrxrCFwMfzdKnlmVUGNhI4cA88 +TvpYlz3p + + + + + + + + + +MIIEUTCCArmgAwIBAgIUcpV0WDMWve6/1FPqzjYJnMLDLRIwDQYJKoZIhvcNAQEL +BQAwKDEmMCQGA1UEAwwddGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWwwHhcN +MTkwMzA4MTMxODU5WhcNMzkwMzA4MTMxODU5WjAoMSYwJAYDVQQDDB10ZXN0LnNo +aWJib2xldGguZWxhc3RpYy5sb2NhbDCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAI4haOiDT0q1eBqwKWbOUfxTVQ2d3nC4wpPekB8YixcePBHu1GEKnCmO +E1/EHyhc5EA0p+IV4GRfwEC4bwWOFt5xY2XJREeXg21G1pQ0R2aHIb9cmJWuHHBt +4pIaEiOw1se4zfiTNgcO2WOc6ixmx/cJYjd+FvMPP3Y96JFycVt7f4DTUZ/IGUTO +jOjwRet9vpBHBzIIN+MEnarwd3mPt3FLhsMU09Q/C3Jfw9x+HcSrCQn7d/3ucvZL +iRdYfGxr6YVapwij5eRkVDx6pRi4jTWgNy7BGieWJLElDoPTapmPZxhTIkxnv9/A +HqZmOc2hErU/aPKe/H5doC9Sfj9jEhXURVYeK7Rsmq8IVW6WnVxODjO62/mS58Uu +slkLiNBd0GDx+PRm7YG1k3khJFihtL1xfKIE8PUEqemYIvoHY3/xTujplUDTdHlC +VRaf5/QhT+vgcn7JEXnWtgW1SzSg//lmN1htfOlPb4pXa/Y/Gg2JrsMXXgu098X8 +u++qOorK2QIDAQABo3MwcTAdBgNVHQ4EFgQUUxIigNlOHt4GZvE4XwmwPq0H2xsw +UAYDVR0RBEkwR4IddGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWyGJmh0dHBz +Oi8vdGVzdC5zaGliYm9sZXRoLmVsYXN0aWMubG9jYWwvMA0GCSqGSIb3DQEBCwUA +A4IBgQB3V3QLkSEnix65cZVH1x4ayZfkyYUeXqQEpjsnz4wGPmWXQp/vJ313mWsV +dtA6uCEHAhAi1It8QpOw0DkQQ39GYLANkra1skjGqOgqjsqCsEX9haaIVvRrG1YP +GA5C6nq88C4KgAYzhfzmOTkDvCLeM/adSV9q8y4juPsrOod81zBjp5GhLfraVUXq +6gwMjRqE+eEgX1RGiYDNl3oar4WB6+cCiTsbkJwHsmGc2LviHxlkPRSIPkJtTUAW +bTCN38FxQda9Ao6ZavSn2Da/HhQm1yxpOdEfTeBSFS5e6NSqj+X1Mc8y0L5cLUyZ +kqQ+q2xhMap2WyDxUm5db9OR5VkrpejR7+f+CYVD7CTt7bqoHQZgaOrGJLje0ZQX +cAeMgEFja4nK0TY8+eD+ijEZ3y/LRMsIl5Hetxaw1atgoVKociuhoF5wZSK0OsDQ +Qx/AEkhqDaSUfSZvSJlD645k/E20NGiK/a8nsquJ3Tkl8rQDEk8r/IKJVmo+vkuX +NgQpDdo= + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/admin/unlock-keys.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/admin/unlock-keys.vm new file mode 100644 index 0000000000000..3b15f3e034daf --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/admin/unlock-keys.vm @@ -0,0 +1,96 @@ +## +## Velocity Template for Attended Startup Unlock Utility +## +## Velocity context will contain the following properties: +## flowRequestContext - the Spring Web Flow RequestContext +## request - HttpServletRequest +## response - HttpServletResponse +## profileRequestContext +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## +#set ($title = $springMacroRequestContext.getMessage("idp.title", "Web Login Service")) +#set ($titleSuffix = $springMacroRequestContext.getMessage("idp.unlock-keys.title", "Attended Restart Key Unlock")) +#set ($eventId = $profileRequestContext.getSubcontext("org.opensaml.profile.context.EventContext").getEvent()) +#set ($state = $flowRequestContext.getCurrentState().getId()) + + + + + + $title - $titleSuffix + + + + +
+
+
+ #springMessageText( +

$title - $titleSuffix

+
+ +
+ #if ($state == "end") + #springMessageText("idp.unlock-keys.complete", "The system is unlocked and ready for use.") +

Validation Link

+ #else + #if ($eventId == "InvalidMessage") +

+ #springMessageText("idp.unlock-keys.error", "Unlock failed; check log for specific message.") +

+

+ #end + +
+ + + +
+ + +
+ +
+ + +
+ + + +
+ + +
+ +
+ +
+ +
+ +
+ +
+ #end +
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-read.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-read.vm new file mode 100644 index 0000000000000..1993c14d7126d --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-read.vm @@ -0,0 +1,53 @@ +## +## Velocity template to read from local storage. +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## loadContext - context with details about the storage keys to load +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +#set ($title = $springMacroRequestContext.getMessage("idp.title", "Web Login Service")) +#set ($titleSuffix = $springMacroRequestContext.getMessage("idp.client-storage-read.suffix", "Loading Session Information")) +## + + + + + + $title - $titleSuffix + + + + +
+
+
+

$title - $titleSuffix

+
+
+ $springMacroRequestContext.getMessage("idp.client-storage-read.text", "Loading login session information from the browser...") +
+ + #parse( "client-storage/read.vm" ) +
+
+ +
+
+ + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-write.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-write.vm new file mode 100644 index 0000000000000..4b92d6b81ba43 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/client-storage/client-storage-write.vm @@ -0,0 +1,53 @@ +## +## Velocity template to write to local storage. +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## saveContext - context with details about the storage data to save +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +#set ($title = $springMacroRequestContext.getMessage("idp.title", "Web Login Service")) +#set ($titleSuffix = $springMacroRequestContext.getMessage("idp.client-storage-write.suffix", "Saving Session Information...")) +## + + + + + + $title - $titleSuffix + + + + +
+
+
+

$title - $titleSuffix

+
+
+ $springMacroRequestContext.getMessage("idp.client-storage-write.text", "Saving login session information to the browser...") +
+ + #parse( "client-storage/write.vm" ) +
+
+ +
+
+ + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/duo.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/duo.vm new file mode 100644 index 0000000000000..cf4f96a80eeaf --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/duo.vm @@ -0,0 +1,83 @@ +## +## Velocity Template for Duo login view-state +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## authenticationContext - context with authentication request information +## rpUIContext - the context with SP UI information from the metadata +## canonicalUsername - name of user passed to Duo +## duoHost - API hostname for Duo frame +## duoRequest - signed Duo request message +## duoScriptPath - path to Duo JavaScript source +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## + + + + + + + #springMessageText("idp.title", "Web Login Service") + + + + +
+
+
+ #springMessageText( +
+ +
+
+ +

#springMessageText("idp.login.duoRequired", "Authentication with Duo is required for the requested service.")

+ + + + +
+ + +
+ +

+ #springMessageText("idp.login.duoCancel", "Cancel this Request") +

+
+ +
+
+ +
+ +
+
+ + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/error.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/error.vm new file mode 100644 index 0000000000000..dcb8e2b06b8cc --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/error.vm @@ -0,0 +1,73 @@ +## +## Velocity Template for error end-state +## +## Velocity context will contain the following properties +## flowRequestContext - the Spring Web Flow RequestContext +## profileRequestContext - root of context tree +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## +#set ($title = $springMacroRequestContext.getMessage("idp.title", "Web Login Service")) +#set ($defaultTitleSuffix = $springMacroRequestContext.getMessage("idp.title.suffix", "Error")) +## +#if ($flowRequestContext) + ## This handles flow events, the most common case. + #set ($eventId = $flowRequestContext.getCurrentEvent().getId()) + #set ($eventKey = $springMacroRequestContext.getMessage("$eventId", "error")) + #set ($titleSuffix = $springMacroRequestContext.getMessage("${eventKey}.title", "$defaultTitleSuffix")) + #set ($message = $springMacroRequestContext.getMessage("${eventKey}.message", "$defaultTitleSuffix: $eventId")) + #if ($eventId == "AccessDenied" or $eventId == "ContextCheckDenied") + $response.setStatus(403) + #elseif ($eventId == "AttributeReleaseRejected" || $eventId == "TermsRejected") + $response.setStatus(200) + #elseif ($eventKey == "unexpected" || $eventKey == "runtime-error" || $eventKey == "error") + $response.setStatus(500) + #else + $response.setStatus(400) + #end +#elseif ($exception) + ## This handles exceptions that reach the Spring-MVC exception handler. + #set ($eventId = $exception.getClass().getSimpleName()) + #set ($eventKey = $springMacroRequestContext.getMessage("$eventId", "error")) + #set ($titleSuffix = $springMacroRequestContext.getMessage("${eventKey}.title", "$defaultTitleSuffix")) + #set ($message = $springMacroRequestContext.getMessage("${eventKey}.message", "$defaultTitleSuffix: $eventId")) +#else + ## This is a catch-all that theoretically shouldn't happen? + #set ($titleSuffix = $defaultTitleSuffix) + #set ($message = $springMacroRequestContext.getMessage("idp.message", "An unidentified error occurred.")) +#end +## + + + + + + $title - $titleSuffix + + + + +
+
+
+ #springMessageText( +

$title - $titleSuffix

+
+ +
+ #evaluate($message) +
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/attribute-release.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/attribute-release.vm new file mode 100644 index 0000000000000..20bde4609e028 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/attribute-release.vm @@ -0,0 +1,159 @@ +## +## Velocity Template for DisplayAttributeReleasePage view-state +## +## Velocity context will contain the following properties : +## +## attributeReleaseContext - context holding consentable attributes +## attributeReleaseFlowDescriptor - attribute consent flow descriptor +## attributeDisplayNameFunction - function to display attribute name +## attributeDisplayDescriptionFunction - function to display attribute description +## consentContext - context representing the state of a consent flow +## encoder - HTMLEncoder class +## flowExecutionKey - SWF execution key (this is built into the flowExecutionUrl) +## flowExecutionUrl - form action location +## flowRequestContext - Spring Web Flow RequestContext +## profileRequestContext - OpenSAML profile request context +## request - HttpServletRequest +## response - HttpServletResponse +## rpUIContext - context with SP UI information from the metadata +## environment - Spring Environment object for property resolution +#set ($serviceName = $rpUIContext.serviceName) +#set ($serviceDescription = $rpUIContext.serviceDescription) +#set ($informationURL = $rpUIContext.informationURL) +#set ($privacyStatementURL = $rpUIContext.privacyStatementURL) +#set ($rpOrganizationLogo = $rpUIContext.getLogo()) +#set ($rpOrganizationName = $rpUIContext.organizationName) +#set ($replaceDollarWithNewline = true) +## + + + + + + + #springMessageText("idp.attribute-release.title", "Information Release") + + +
+
+
+ + #if ($rpOrganizationLogo) + + #end +
+ #if ($serviceName) +

+ #springMessageText("idp.attribute-release.serviceNameLabel", "You are about to access the service:")
+ $serviceName + #if ($rpOrganizationName) + #springMessageText("idp.attribute-release.of", "of") $encoder.encodeForHTML($rpOrganizationName) + #end +

+ #end + #if ($serviceDescription) +

+ #springMessageText("idp.attribute-release.serviceDescriptionLabel", "Description as provided by this service:")
+ $encoder.encodeForHTML($serviceDescription) +
+

+ #end + #if ($informationURL) +

+ #springMessageText("idp.attribute-release.informationURLLabel", "Additional information about the service") +

+ #end +
+ + + + + + + + #foreach ($attribute in $attributeReleaseContext.getConsentableAttributes().values()) + + + + + + #end + +
+ #springMessageText("idp.attribute-release.attributesHeader", "Information to be Provided to Service") +
$encoder.encodeForHTML($attributeDisplayNameFunction.apply($attribute)) + #foreach ($value in $attribute.values) + #if ($replaceDollarWithNewline) + #set ($encodedValue = $encoder.encodeForHTML($value.getDisplayValue()).replaceAll($encoder.encodeForHTML("$"),"
")) + #else + #set ($encodedValue = $encoder.encodeForHTML($value.getDisplayValue())) + #end + #if ($attributeReleaseFlowDescriptor.perAttributeConsentEnabled) + + #else + $encodedValue + #end +
+ #end +
+ #if ($attributeReleaseFlowDescriptor.perAttributeConsentEnabled) + #set ($inputType = "checkbox") + #else + #set ($inputType = "hidden") + #end + +
+
+ #if ($privacyStatementURL) +

+ #springMessageText("idp.attribute-release.privacyStatementURLLabel", "Data privacy information of the service") +

+ #end +
+

+ #springMessageText("idp.attribute-release.confirmationQuestion", "The information above would be shared with the service if you proceed. Do you agree to release this information to the service every time you access it?") +

+ #if ($attributeReleaseFlowDescriptor.doNotRememberConsentAllowed || $attributeReleaseFlowDescriptor.globalConsentAllowed) +
+ #springMessageText("idp.attribute-release.consentMethod", "Select an information release consent duration:") + #end + #if ($attributeReleaseFlowDescriptor.doNotRememberConsentAllowed) +

+ + +

    +
  • #springMessageText("idp.attribute-release.doNotRememberConsentItem", "I agree to send my information this time.")
  • +
+

+ #end + #if ($attributeReleaseFlowDescriptor.doNotRememberConsentAllowed || $attributeReleaseFlowDescriptor.globalConsentAllowed) +

+ + +

    +
  • #springMessageText("idp.attribute-release.rememberConsentItem", "I agree that the same information will be sent automatically to this service in the future.")
  • +
+

+ #end + #if ($attributeReleaseFlowDescriptor.globalConsentAllowed) +

+ + +

    +
  • #springMessageText("idp.attribute-release.globalConsentItem", "I agree that all of my information will be released to any service.")
  • +
+

+ #end + #if ($attributeReleaseFlowDescriptor.doNotRememberConsentAllowed || $attributeReleaseFlowDescriptor.globalConsentAllowed) + #springMessageText("idp.attribute-release.consentMethodRevoke", "This setting can be revoked at any time with the checkbox on the login page.") +
+ #end +

+ + +

+
+
+
+ + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/expiring-password.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/expiring-password.vm new file mode 100644 index 0000000000000..4395844a200c5 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/expiring-password.vm @@ -0,0 +1,54 @@ +## +## Velocity Template for expiring password view +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## authenticationContext - context with authentication request information +## authenticationErrorContext - context with login error state +## authenticationWarningContext - context with login warning state +## ldapResponseContext - context with LDAP state (if using native LDAP) +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## + + + + + + #springMessageText("idp.title", "Web Login Service") + + + + + +
+
+
+ #springMessageText( +

#springMessageText("idp.login.expiringSoon", "Your password will be expiring soon!")

+
+ +
+

#springMessageText("idp.login.changePassword", "To create a new password now, go to") + #.

+

#springMessageText("idp.login.proceedBegin", "Your login will proceed in 20 seconds or you may click") + #springMessageText("idp.login.proceedHere", "here") + #springMessageText("idp.login.proceedEnd", "to continue").

+
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/impersonate.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/impersonate.vm new file mode 100644 index 0000000000000..2bae957b2494a --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/impersonate.vm @@ -0,0 +1,90 @@ +## +## Velocity Template for expiring password view +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## rpUIContext - the context with SP UI information from the metadata +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## +#set ($rpContext = $profileRequestContext.getSubcontext('net.shibboleth.idp.profile.context.RelyingPartyContext')) + + + + + + #springMessageText("idp.title", "Web Login Service") + + + + +
+
+
+ #springMessageText( +

#springMessageText("idp.impersonate.header", "Account Impersonation")

+
+ +
+ +
+ + #set ($serviceName = $rpUIContext.serviceName) + #if ($serviceName && !$rpContext.getRelyingPartyId().contains($serviceName)) + + $encoder.encodeForHTML($serviceName) + + #end + + + #springMessageText("idp.impersonate.text", "Enter an account name to impersonate to this service or continue normally.") + + +
+ + + + + +
+ +
+ +
+ +
+ +
+ +
+ +
+
+ +
+ +
+ +
+ + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/terms-of-use.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/terms-of-use.vm new file mode 100644 index 0000000000000..1bf12c7e7f1b6 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/intercept/terms-of-use.vm @@ -0,0 +1,67 @@ +## +## Velocity Template for DisplayTermsOfUsePage view-state +## +## Velocity context will contain the following properties : +## +## encoder - HTMLEncoder class +## flowExecutionKey - SWF execution key (this is built into the flowExecutionUrl) +## flowExecutionUrl - form action location +## flowRequestContext - Spring Web Flow RequestContext +## request - HttpServletRequest +## response - HttpServletResponse +## rpUIContext - context with SP UI information from the metadata +## termsOfUseId - terms of use ID to lookup message strings +## environment - Spring Environment object for property resolution +#set ($serviceName = $rpUIContext.serviceName) +#set ($rpOrganizationLogo = $rpUIContext.getLogo()) +## + + + + + + + #springMessageText("${termsOfUseId}.title", "Terms of Use") + + +
+
+ + #if ($rpOrganizationLogo) + + #end +
+ #if ($rpOrganizationLogo) +
+

#springMessageText("${termsOfUseId}.title", "Terms of Use")

+
+ #end +
+ #springMessageText("${termsOfUseId}.text", "Terms of Use Text...") +
+
+
+
+ +
+
+
+
+ + + #if ($requireCheckbox) +

#springMessageText("idp.terms-of-use.required", "Please check this box if you want to proceed.")

+ #end + +
+
+
+
+
+ +
+
+ + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login-error.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login-error.vm new file mode 100644 index 0000000000000..44676b3219081 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login-error.vm @@ -0,0 +1,24 @@ +## Velocity Template for login error message production, included by login.vm +## +## authenticationErrorContext - context containing error data, if available +## +#if ($authenticationErrorContext && $authenticationErrorContext.getClassifiedErrors().size() > 0 && $authenticationErrorContext.getClassifiedErrors().iterator().next() != "ReselectFlow") + ## This handles errors that are classified by the message maps in the authentication config. + #set ($eventId = $authenticationErrorContext.getClassifiedErrors().iterator().next()) + #set ($eventKey = $springMacroRequestContext.getMessage("$eventId", "login")) + #set ($message = $springMacroRequestContext.getMessage("${eventKey}.message", "Login Failure: $eventId")) +#elseif ($authenticationErrorContext && $authenticationErrorContext.getExceptions().size() > 0) + ## This handles login exceptions that are left unclassified. + #set ($loginException = $authenticationErrorContext.getExceptions().get(0)) + #if ($loginException.getMessage()) + #set ($message = "Login Failure: $loginException.getMessage()") + #else + #set ($message = $loginException.toString()) + #end +#end + +#if ($message) +
+

$encoder.encodeForHTML($message)

+
+#end diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login.vm new file mode 100644 index 0000000000000..4ebf9bfc6a62f --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/login.vm @@ -0,0 +1,144 @@ +## +## Velocity Template for DisplayUsernamePasswordPage view-state +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## authenticationContext - context with authentication request information +## authenticationErrorContext - context with login error state +## authenticationWarningContext - context with login warning state +## ldapResponseContext - context with LDAP state (if using native LDAP) +## rpUIContext - the context with SP UI information from the metadata +## extendedAuthenticationFlows - collection of "extended" AuthenticationFlowDescriptor objects +## passwordPrincipals - contents of the shibboleth.authn.Password.PrincipalOverride bean +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## +#set ($rpContext = $profileRequestContext.getSubcontext('net.shibboleth.idp.profile.context.RelyingPartyContext')) +#set ($username = $authenticationContext.getSubcontext('net.shibboleth.idp.authn.context.UsernamePasswordContext', true).getUsername()) +#set ($passwordEnabled = false) +#if (!$passwordPrincipals or $passwordPrincipals.isEmpty() or $authenticationContext.isAcceptable($passwordPrincipals)) + #set ($passwordEnabled = true) +#end +## + + + + + + #springMessageText("idp.title", "Web Login Service") + + + +
+
+
+ #springMessageText( +
+ +
+
+ #parse("login-error.vm") + +
+ + #set ($serviceName = $rpUIContext.serviceName) + #if ($serviceName && !$rpContext.getRelyingPartyId().contains($serviceName)) + + #springMessageText("idp.login.loginTo", "Login to") $encoder.encodeForHTML($serviceName) + + #end + + #if ($passwordEnabled) +
+ + +
+ +
+ + +
+ + ## You may need to modify this to taste, such as changing the flow name its checking for to authn/MFA. + #if (!$authenticationContext.getActiveResults().containsKey('authn/Password')) +
+ + +
+ #end + + #end + +
+ + +
+ + #if ($passwordEnabled) +
+ +
+ #end + + #foreach ($extFlow in $extendedAuthenticationFlows) + #if ($authenticationContext.isAcceptable($extFlow) and $extFlow.apply(profileRequestContext)) +
+ +
+ #end + #end +
+ + #* + // + // SP Description & Logo (optional) + // These idpui lines will display added information (if available + // in the metadata) about the Service Provider (SP) that requested + // authentication. These idpui lines are "active" in this example + // (not commented out) - this extra SP info will be displayed. + // Remove or comment out these lines to stop the display of the + // added SP information. + // + *# + #set ($logo = $rpUIContext.getLogo()) + #if ($logo) + $encoder.encodeForHTMLAttribute($serviceName) + #end + #set ($desc = $rpUIContext.getServiceDescription()) + #if ($desc) + $encoder.encodeForHTML($desc) + #end + +
+ +
+
+ +
+ +
+
+ + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-complete.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-complete.vm new file mode 100644 index 0000000000000..d780252c9062f --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-complete.vm @@ -0,0 +1,59 @@ +## +## Velocity Template for logout flow's concluding view-state (no propagation) +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## logoutContext - context with SPSession details for logout operation +## multiRPContext - context with RelyingPartyContexts and possibly SP UI information from the metadata +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## + + + + + + #springMessageText("idp.title", "Web Login Service") + + + + +
+
+
+ #springMessageText( +
+ +
+
+

#springMessageText("idp.logout.local", "You elected not to log out of all the applications accessed during your session.")

+
+ +
+
+ + + #if ( $profileRequestContext.getProfileId().contains("saml2/logout") ) + + #end + +
+ +
+
+ + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-propagate.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-propagate.vm new file mode 100644 index 0000000000000..86b3fa14d6500 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout-propagate.vm @@ -0,0 +1,58 @@ +## +## Velocity Template for logout flow's concluding view-state (with propagation) +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## logoutContext - context with SPSession details for logout operation +## multiRPContext - context with RelyingPartyContexts and possibly SP UI information from the metadata +## htmlEncoder - HTMLEncoder class +## urlEncoder - urlEncoder class +## codecUtil - CodecUtil class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## + + + + + + #springMessageText("idp.title", "Web Login Service") + + + + + +
+
+
+ #springMessageText( +
+ +
+
+

#springMessageText("idp.logout.attempt", "Attempting to log out of the following services:")

+ #parse("logout/propagate.vm") +
+ +
+
+ +
+ +
+
+ + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout.vm new file mode 100644 index 0000000000000..d31ae0e6a019e --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/logout.vm @@ -0,0 +1,107 @@ +## +## Velocity Template for logout flow's starting view-state +## +## Velocity context will contain the following properties +## flowExecutionUrl - the form action location +## flowRequestContext - the Spring Web Flow RequestContext +## flowExecutionKey - the SWF execution key (this is built into the flowExecutionUrl) +## profileRequestContext - root of context tree +## logoutContext - context with SPSession details for logout operation +## multiRPContext - context with RelyingPartyContexts and possibly SP UI information from the metadata +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## +#set ($rpContext = $profileRequestContext.getSubcontext("net.shibboleth.idp.profile.context.RelyingPartyContext")) +#if ($rpContext) +#set ($rpUIContext = $rpContext.getSubcontext("net.shibboleth.idp.ui.context.RelyingPartyUIContext")) +#end + + + + + + #if ( $logoutContext and !$logoutContext.getSessionMap().isEmpty() ) + + #end + #springMessageText("idp.title", "Web Login Service") + + + + +
+
+
+ #springMessageText( +
+ +
+
+

This page is displayed when a logout operation at the Identity Provider completes. This page is an example + and should be customized. It is not fully internationalized because the presentation will be a highly localized + decision, and we don't have a good suggestion for a default.

+
+ + #if ($rpContext) +

#springMessageText("idp.logout.sp-initiated", "You have been logged out of the following service:")

+
+ #if ($rpUIContext) + $encoder.encodeForHTML($rpUIContext.getServiceName()) + #else + $encoder.encodeForHTML($rpContext.getRelyingPartyId()) + #end +
+
+ #end + + #if ( $logoutContext and !$logoutContext.getSessionMap().isEmpty() ) +

#springMessageText("idp.logout.ask", "Would you like to attempt to log out of all services accessed during your session? Please select Yes or No to ensure the logout operation completes, or wait a few seconds for Yes.")

+
+ +
+ + +
+ +
+

#springMessageText("idp.logout.contactServices", "If you proceed, the system will attempt to contact the following services:")

+
    + #foreach ($sp in $logoutContext.getSessionMap().keySet()) + #set ($rpCtx = $multiRPContext.getRelyingPartyContextById($sp)) + #if ($rpCtx) + #set ($rpUIContext = $rpCtx.getSubcontext("net.shibboleth.idp.ui.context.RelyingPartyUIContext")) + #end + #if ($rpUIContext and $rpUIContext.getServiceName()) +
  1. $encoder.encodeForHTML($rpUIContext.getServiceName())
  2. + #else +
  3. $encoder.encodeForHTML($sp)
  4. + #end + #end +
+ #else +

#springMessageText("idp.logout.complete", "The logout operation is complete, and no other services appear to have been accessed during this session.")

+ + + #end + +
+ +
+
+ +
+ +
+
+ + + \ No newline at end of file diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/spnego-unavailable.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/spnego-unavailable.vm new file mode 100644 index 0000000000000..3673f02a21e0e --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/spnego-unavailable.vm @@ -0,0 +1,49 @@ +## +## Velocity Template for SPNEGO unauthorized page +## +## This is not a Spring Webflow view, but a special view internal to the +## SPNEGO login flow, so it doesn't contain all of the usual SWF variables. +## +## Velocity context will contain the following properties +## encoder - HTMLEncoder class +## request - HttpServletRequest +## response - HttpServletResponse +## profileRequestContext - root of context tree +## errorUrl - URL to call to indicate error and return back to the login flow +## +#set ($eventKey = $springMacroRequestContext.getMessage("SPNEGOUnavailable", "spnego-unavailable")) + + + + + + #springMessageText("idp.title", "Web Login Sevice") - #springMessageText("${eventKey}.title", "Error") + + + + +
+
+
+ #springMessageText( +

#springMessageText("idp.title", "Web Login Sevice") - #springMessage("idp.title.suffix", "Error")

+
+ +
+ #springMessageText("${eventKey}.message", "Your web browser doesn't support authentication with your desktop login credentials.") + +
+
+ +
+ +
+
+ + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/views/user-prefs.vm b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/user-prefs.vm new file mode 100644 index 0000000000000..8de0503cd77dc --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/views/user-prefs.vm @@ -0,0 +1,60 @@ +## +## Velocity Template for user preferences view +## +## Velocity context will contain the following properties +## request - HttpServletRequest +## response - HttpServletResponse +## environment - Spring Environment object for property resolution +## custom - arbitrary object injected by deployer +## + + + + + + #springMessageText("idp.userprefs.title", "Web Login Service") - #springMessageText("idp.userprefs.title.suffix", "Login Preferences") + + + + +
+
+
+ #springMessageText( +

#springMessageText("idp.title", "Web Login Service") - #springMessageText("idp.userprefs.title.suffix", "Login Preferences")

+

+ #springMessage("idp.userprefs.info") +

+
+ + + + +
+ +
+ +
+
+ + + diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/consent.css b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/consent.css new file mode 100644 index 0000000000000..5daabeed01816 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/consent.css @@ -0,0 +1,150 @@ +.box { + width:600px; + margin-left: auto; + margin-right: auto; + margin-top: 50px; + background-color: white; + -webkit-box-shadow: 1px 1px 15px #999999; + -moz-box-shadow: 1px 1px 15px #999999; + box-shadow: 1px 1px 15px #999999; + -webkit-border-radius: 8px; + -moz-border-radius: 8px; + border-radius: 8px; + overflow: auto; + padding: 1.268em; +} + +body { + font-family:Verdana, Geneva, sans-serif; + font-size: 12px; +} + +h1 { + font-size: 13px; + padding-bottom: 12px; +} + +a { + color: #00247D; + text-decoration: underline; +} + +a:visited { + color: #00247D; + text-decoration: underline; +} + +a:focus, a:hover, a:active { + color: #F39800; + text-decoration: underline; +} + +#tou-content { + font-family:monospace; + width: 95%; + border: solid 1px #666; + margin: 4px; + padding: 10px; + overflow: hidden; +} + +#tou-content li{ + margin-bottom:10px; +} + +#tou-acceptance { + width: 95%; + border: solid 1px #666; + background-color: #F0F0F0; + margin: 4px; + padding: 10px; + text-align: left; + overflow: hidden; +} + +.service_name { + font-weight: bold; +} + +.service_description { + font-style: italic; +} + +.organization_name { +} + +#attributeRelease-consent { + width: 95%; + border: solid 1px #666; + background-color: #F0F0F0; + margin: 4px; + overflow: hidden; +} + +#attributeRelease { + width: 95%; + margin: 4px; + border: solid 1px black; + overflow: auto; +} + +#attributeRelease table { + border-collapse: collapse; + border: none 0px white; + width: 100%; +} + +#attributeRelease td { + padding: 3px 7px; + vertical-align: top; +} + +#attributeRelease th { + text-align: left; + font-size: 18px; + padding: 5px 7px; + background-color:#00247D; + color: white; +} + +#attributeRelease tr:nth-of-type(even) { + background-color: #E4E5E3; +} + +.federation_logo +{ + width: 50%; + float: left; + padding-top: 35px; + border: 0; +} +.organization_logo +{ + width: 50%; + float: right; + border: 0; +} + +.form-error { + padding: 0; + color: #B61601; +} + +/* Device specific styles */ +@media only screen and (max-device-width: 721px){ + .box { + width: auto; + box-shadow: none; + border-radius: 0; + -webkit-box-shadow: none; + -webkit-border-radius: 0; + -moz-box-shadow: none; + -moz-border-radius: 0; + padding: 0; + margin-top:0; + } + #tou-content, #tou-acceptance{ + /*width:87%;*/ + width:auto; + } +} diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/logout.css b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/logout.css new file mode 100644 index 0000000000000..26f18931c9d91 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/logout.css @@ -0,0 +1,12 @@ +/* Success/Failure indicators for logout propagation. */ + +.success { + background: url(../images/success-32x32.png) no-repeat left center; + line-height: 36px; + padding-left: 36px; +} +.failure { + background: url(../images/failure-32x32.png) no-repeat left center; + line-height: 36px; + padding-left: 36px; +} diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/main.css b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/main.css new file mode 100644 index 0000000000000..116b31e444f91 --- /dev/null +++ b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/css/main.css @@ -0,0 +1,165 @@ +* { + margin: 0; + padding: 0; +} +header, footer, section, nav { + display: block; +} +html, body { + height: 100%; +} +body { + font-family:Verdana, Geneva, sans-serif; + font-size: 12px; + line-height: 1.5; + color: #717171; + background: #717171; +} +a:link, +a:visited { + text-decoration: none; + color: #717171; +} +img { + max-width: 100%; + margin-bottom: 12px; +} + +.wrapper { + background: #ffffff; +} + +.container { + position: relative; + left: 34%; + width: 540px; + margin-left: -270px; +} +.container-footer { + padding-top: 12px; +} +@media only screen and (max-width: 1020px) { + .container { + left: 45%; + } +} +@media only screen and (max-width: 650px) { + .container { + position: static; + margin: 0 auto; + width: 280px; + } +} + +header { + padding: 20px 0; +} + +.logo img { + border: none; +} +@media only screen and (max-width: 650px) { + .logo img { + display: none; + } + .logo { + background: url(../images/dummylogo-mobile.png) no-repeat top center; + display: block; + height: 115px; + width: 100px; + margin: 0 auto; + } +} + +.content { + padding-bottom: 80px; + overflow: hidden; +} + +.column { + float: left; +} +.column.one { + width: 50%; + margin-right: 48px; +} + +form { + width: 240px; + padding-bottom: 21px; +} +form label { /* labels are hidden */ + font-weight: bold; +} +form legend { + font-size:1.2em; + margin-bottom: 12px; +} +.form-element-wrapper { + margin-bottom: 12px; +} +.form-element { + width: 100%; + padding: 13px 12px; + border: none; + font-size: 14px; + border-radius: 4px; + -webkit-border-radius: 4px; + -moz-border-radius: 4px; +} +.form-field { + color: #B7B7B7; + border: 1px solid #B7B7B7; +} +.form-field-focus, +.form-field:focus, +input[type="text"]:focus { + color: #333333; + border-color: #333; +} +.form-button { + background: #B61601; + box-sizing: content-box; + -moz-box-sizing: content-box; + color: #ffffff; + cursor: pointer; +} +.form-button:hover { + background: #FF6400; +} +.form-error { + padding: 0; + color: #B61601; +} + +.list-help { + margin-top: 40px; /* offset padding on first anchor */ + list-style: none; +} +.list-help-item a { + display: block; + padding: 6px 0; +} +.item-marker { + color: #be0000; +} + +footer { + color: #ffffff; + font-size: 11px; + background: #717171; +} +.footer-text { + margin-bottom: 12px; +} +.footer-links a:link, +.footer-links a:visited { + color: #ffffff; + font-weight: bold; +} +.footer-links a:after { + content: "\00a0\00a0\00a0|\00a0\00a0"; +} +.footer-links a.last:after { + content: ""; +} diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo-mobile.png b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo-mobile.png new file mode 100644 index 0000000000000..8ba3c95a12a93 Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo-mobile.png differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo.png b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo.png new file mode 100644 index 0000000000000..e89ede6e2089b Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/dummylogo.png differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/failure-32x32.png b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/failure-32x32.png new file mode 100644 index 0000000000000..3c48e4669dccf Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/failure-32x32.png differ diff --git a/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/success-32x32.png b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/success-32x32.png new file mode 100644 index 0000000000000..aa512048d8fe9 Binary files /dev/null and b/x-pack/test/idp-fixture/idp/shibboleth-idp/webapp/images/success-32x32.png differ diff --git a/x-pack/test/idp-fixture/openldap/certs/README b/x-pack/test/idp-fixture/openldap/certs/README new file mode 100644 index 0000000000000..d5c95b625c44b --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/README @@ -0,0 +1,5 @@ +certtool -p --outfile ca_server.key +certtool -s --load-privkey ca_server.key --template templates/ca_server.conf --outfile ca_server.pem +certtool -p --sec-param high --outfile ldap_server.key +certtool -c --load-privkey ldap_server.key --load-ca-certificate ca_server.pem --load-ca-privkey ca_server.key --template templates/ldap_server.conf --outfile ldap_server.pem +keytool -importcert -file ca_server.pem -alias idp-fixture-ca -keystore ca.jks -noprompt -storepass changeit diff --git a/x-pack/test/idp-fixture/openldap/certs/ca.jks b/x-pack/test/idp-fixture/openldap/certs/ca.jks new file mode 100644 index 0000000000000..0b1e25b8c0f8b Binary files /dev/null and b/x-pack/test/idp-fixture/openldap/certs/ca.jks differ diff --git a/x-pack/test/idp-fixture/openldap/certs/ca_server.key b/x-pack/test/idp-fixture/openldap/certs/ca_server.key new file mode 100644 index 0000000000000..170940eb1e72e --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/ca_server.key @@ -0,0 +1,182 @@ +Public Key Info: + Public Key Algorithm: RSA + Key Security Level: High (3072 bits) + +modulus: + 00:bb:4f:a2:af:4f:c2:44:59:c9:5d:99:29:4f:7c:46 + 85:71:11:15:8e:7d:5c:37:5b:94:26:53:e8:c1:0c:3e + ef:cf:4c:51:be:30:f6:5f:3f:80:b4:0a:8b:c9:80:84 + f7:47:7c:c6:76:b1:63:8d:ab:67:b2:17:8d:df:4a:e7 + aa:f0:56:eb:e3:80:17:d5:ea:25:55:d5:5a:39:ab:9c + b1:b1:94:ec:89:1f:99:ae:37:77:17:f0:2f:85:35:d1 + b2:7d:86:2e:8d:ab:4b:64:3a:e0:28:e8:74:9a:b3:84 + fb:cb:89:94:a8:50:25:ec:b1:db:fe:b0:60:99:f0:7e + 16:8e:89:b2:82:ee:07:ce:84:be:cb:e6:fa:c0:47:dc + ca:77:7b:73:4b:61:12:c6:1c:a9:a7:06:72:fb:31:5c + e4:94:d9:b9:f2:c7:c6:78:4f:75:5e:61:bd:a2:f8:35 + 97:06:d1:40:0b:30:bf:6f:35:73:7e:df:8a:ba:2f:9b + ba:4b:60:91:d0:93:90:97:b6:43:f4:d6:8e:a9:61:18 + 7f:e8:dc:0f:7c:c5:94:66:f9:40:e7:c8:91:b7:12:de + 0c:0c:b3:66:86:98:b3:4a:c1:70:f2:00:78:0e:c2:bb + 3a:b7:1e:a5:c3:f3:b0:06:2f:1b:45:68:23:97:e5:42 + 7a:da:30:d4:e5:4d:9e:57:6f:08:2d:ce:94:68:94:31 + 31:e2:a2:8a:3d:f7:18:6a:e2:c4:b8:fe:58:b2:d7:95 + 6c:f7:19:c1:40:4d:c4:5f:98:5a:2f:34:d1:70:cd:d8 + 3f:1a:6d:4a:d0:d4:dc:8a:7c:a0:28:39:fe:f1:00:71 + b3:66:6e:fc:96:fb:a1:5b:12:8b:ef:f2:f4:4c:53:02 + b7:17:58:c7:33:57:42:c4:f8:66:0b:db:b4:a4:f7:9a + 87:3c:47:0d:09:c9:cd:e9:fc:2e:39:0a:91:30:96:a0 + dd:61:c1:31:d9:b3:3f:81:a1:51:fc:a9:14:6c:41:b2 + 6b: + +public exponent: + 01:00:01: + +private exponent: + 4b:e3:5f:ce:17:4a:65:d6:da:b6:89:2f:2b:7c:f6:35 + 6a:31:b9:c2:19:18:1b:75:86:f8:60:56:94:a0:19:7f + 9f:45:14:79:31:ed:1f:79:e2:f3:0e:b2:c3:d8:c6:06 + 49:78:4c:10:1a:9e:72:ef:56:fb:d8:7f:2a:50:76:bb + d2:d3:e4:8e:2f:7f:78:b3:7c:9c:e1:eb:c4:21:bb:42 + 62:e8:2b:4d:02:f8:88:95:00:33:3a:a4:86:1b:df:f1 + a8:d9:8c:52:e9:6b:8a:18:30:69:62:84:9b:88:d0:00 + 13:de:91:d3:ff:64:de:18:d1:40:c6:c4:dc:99:8a:fc + 28:93:b1:37:4a:a8:ef:0a:e9:22:43:5a:a4:3c:79:3f + ae:58:dd:c5:54:76:fe:bc:ee:1f:35:85:b0:2f:b7:ab + 04:83:91:ca:c3:15:ad:f8:c3:5c:91:cc:fb:12:04:e4 + 43:66:d3:c3:b4:e4:5a:72:15:70:f9:b0:82:ee:76:f0 + b8:d7:2c:fc:07:2b:64:73:a6:64:1d:d2:d9:93:88:f3 + a7:9e:20:41:03:75:f8:14:80:63:1f:df:2f:85:c7:02 + 18:c1:f6:f6:2d:65:0d:10:f8:9a:61:a8:06:e8:20:1c + 8e:42:e7:a2:25:37:85:75:24:b3:86:a8:b4:6f:cd:5b + be:19:bb:95:53:2b:a7:a5:f1:e2:44:41:47:5e:5c:39 + 35:f7:bd:ac:fe:76:07:da:72:ce:22:b9:60:97:97:9b + c8:ee:24:7a:28:97:d8:48:57:8a:40:f0:64:6c:2d:1f + f4:ff:d3:9f:a0:93:16:28:33:e3:13:e3:2d:76:30:70 + a4:de:3d:f4:1b:a0:f3:55:25:7e:98:7b:26:8c:8c:63 + cb:a1:e9:dd:b4:e8:18:31:aa:ab:87:b8:24:6d:de:5b + 4e:14:2e:d1:27:9b:2e:f0:7d:c5:7a:be:68:15:b0:1d + 57:b4:34:7f:88:ff:03:61:28:12:e4:58:50:82:7b:81 + + +prime1: + 00:f3:95:0d:b3:ea:5c:48:1a:d8:2f:24:a4:09:c3:56 + 21:a5:96:44:ff:d9:6b:42:f3:c6:87:e6:a9:c1:6e:8e + aa:cb:c7:f0:d9:28:b5:48:36:dc:a6:e1:47:f4:5f:64 + 25:61:1e:6c:4c:01:45:02:2a:22:29:2f:26:a1:ba:15 + 62:f4:c7:5d:04:04:9c:89:8a:1b:e3:95:e5:17:5f:8d + 33:65:b3:42:e6:36:16:5e:b7:bc:97:03:21:15:11:36 + 3c:e6:b6:dc:c3:37:f4:1a:57:7a:bc:c6:4a:83:3f:ca + 2b:64:9e:9a:14:2a:92:c8:a3:f7:4a:fa:39:e1:b3:07 + a7:19:a6:5d:19:71:09:e1:6b:a4:ec:5f:d2:08:79:c8 + cb:4b:ac:b0:bb:96:ae:95:9a:f6:6d:1e:19:79:33:c7 + 4e:e2:bf:e2:af:16:a8:81:46:33:14:a6:cf:f3:20:cc + e3:4b:3a:93:19:ff:c1:8e:61:e1:bb:f2:03:d9:66:fa + 93: + +prime2: + 00:c4:dc:32:8d:8a:aa:71:17:6b:da:04:61:55:bd:5b + de:37:fe:85:c5:9f:31:6f:d1:68:de:87:b7:86:d5:39 + c8:c4:3c:5f:9d:d0:9b:69:74:3b:ca:85:84:b6:09:cc + bd:ba:e4:f0:af:47:dd:25:10:5e:25:72:2d:d5:4e:40 + f5:8a:09:09:97:40:34:60:1e:d5:88:b7:4d:04:3b:51 + 01:2a:a0:42:bf:12:0b:51:d0:38:df:d3:7f:c7:92:26 + d9:5c:e1:e0:52:12:f0:bc:08:cc:c1:90:70:82:11:02 + 77:a7:00:0c:73:a8:7c:e8:61:64:f4:ee:42:27:82:31 + 4a:56:f9:ce:49:62:96:b6:c2:32:f3:dc:01:a5:cf:04 + 3b:ef:68:01:2e:df:ea:5e:79:b7:39:ac:d5:49:c9:d0 + 82:5a:75:60:c1:fe:8a:00:b3:b2:e3:ab:d5:e2:54:0a + dd:8d:0f:f3:7b:00:40:82:dd:86:6d:7c:69:ec:25:57 + c9: + +coefficient: + 7e:50:e0:a0:d5:37:7e:fb:33:36:85:96:1c:5b:ee:12 + 61:de:88:d8:e1:4d:8e:a8:ce:d5:b3:59:78:4f:53:8c + 82:21:f3:e8:41:07:34:75:89:69:02:72:a0:e2:82:c6 + 06:20:0a:ab:a1:6f:9c:fc:62:b2:80:89:54:29:e2:96 + e2:63:2d:c0:f6:1f:22:6a:7f:43:28:aa:8a:d2:f9:62 + 71:42:b8:01:1c:1a:3d:16:1f:37:e8:b9:c4:1e:5b:e6 + f9:b9:77:d4:aa:82:9c:14:1c:97:ab:a0:84:e8:2a:ed + 73:96:6f:14:ee:f7:a3:42:97:07:b6:01:2a:7b:c6:93 + ca:cd:bf:e1:8b:37:c9:48:b5:ce:95:19:59:ef:e3:f6 + df:85:5e:6e:b5:8c:3e:43:ea:bf:8a:af:53:51:aa:43 + 9f:00:9f:88:04:2c:cc:cf:f1:28:db:00:e6:81:f4:a7 + e1:85:64:b4:65:f8:ce:d9:b1:50:64:12:2a:ab:76:f8 + + +exp1: + 13:38:18:8b:b0:9b:35:69:40:d3:7f:6d:9e:03:96:a1 + 33:02:87:51:34:a3:7c:f5:e5:1d:45:fa:f0:f3:3b:38 + 88:b9:ae:ec:43:85:02:3c:a9:ee:54:ea:2b:82:f7:50 + e7:b7:d1:cd:b1:6f:f3:7a:c5:95:40:4e:29:06:98:b7 + 65:1e:f1:dc:e6:4c:3f:a1:f1:0f:64:fd:5d:79:c1:97 + 63:97:e0:58:6f:be:16:1d:23:39:3b:1a:fa:21:90:88 + ff:9f:40:0d:fb:35:3c:7a:d8:89:5a:8f:f8:89:28:f7 + 81:89:ee:18:57:3f:9b:ab:2c:50:f3:2b:6b:fe:87:45 + 0d:06:0e:64:7a:81:d0:12:81:41:d1:51:ae:2f:85:c3 + 5c:a6:34:8f:a8:94:ff:e2:e9:3c:b4:da:47:b0:e9:31 + f5:3c:34:b3:a1:f3:0b:b8:31:41:98:e3:e1:88:1e:02 + 75:26:1b:03:3b:25:d7:89:2b:99:7b:c0:08:00:53:17 + + +exp2: + 00:9d:7a:f5:69:eb:52:db:f3:7b:90:c5:ce:43:b3:9e + a9:4f:2b:ea:41:a4:4e:fc:ff:8d:9f:ee:fb:ee:fd:e6 + 22:e8:70:cf:a0:c4:bf:7e:85:c0:4b:0b:b2:95:99:93 + 52:f8:0e:ab:47:dc:fa:58:eb:16:15:68:98:a7:b9:d9 + 17:53:9c:55:32:cc:e6:d9:39:f1:62:54:18:65:59:3a + 3c:ef:a7:92:9d:9d:46:b6:e5:16:8a:5f:7c:29:df:8d + fa:76:66:2f:78:18:b4:c2:07:5c:1d:40:79:f0:62:c1 + 11:8c:e3:ea:12:1f:e8:04:51:dd:5c:5e:b9:47:37:cc + 4c:86:f5:81:83:8d:3d:b9:e7:f2:b0:c7:ff:fd:6f:75 + 18:a9:35:0f:35:3b:65:16:bc:d1:c9:6e:ac:95:41:bf + ee:77:84:cc:a9:84:01:4b:fc:75:0f:d2:67:e0:8d:90 + bf:99:57:f8:54:1e:68:7d:07:cf:c1:08:35:07:f6:fb + 99: + + +Public Key PIN: + pin-sha256:8n9vHwv9uP15hLS09K0gyVdW8LzD7lVp0qIUFexwEho= +Public Key ID: + sha256:f27f6f1f0bfdb8fd7984b4b4f4ad20c95756f0bcc3ee5569d2a21415ec70121a + sha1:ffa9f3de78471ea8fe2e06ff753aa952ae96c0c9 + +-----BEGIN RSA PRIVATE KEY----- +MIIG4wIBAAKCAYEAu0+ir0/CRFnJXZkpT3xGhXERFY59XDdblCZT6MEMPu/PTFG+ +MPZfP4C0CovJgIT3R3zGdrFjjatnsheN30rnqvBW6+OAF9XqJVXVWjmrnLGxlOyJ +H5muN3cX8C+FNdGyfYYujatLZDrgKOh0mrOE+8uJlKhQJeyx2/6wYJnwfhaOibKC +7gfOhL7L5vrAR9zKd3tzS2ESxhyppwZy+zFc5JTZufLHxnhPdV5hvaL4NZcG0UAL +ML9vNXN+34q6L5u6S2CR0JOQl7ZD9NaOqWEYf+jcD3zFlGb5QOfIkbcS3gwMs2aG +mLNKwXDyAHgOwrs6tx6lw/OwBi8bRWgjl+VCetow1OVNnldvCC3OlGiUMTHiooo9 +9xhq4sS4/liy15Vs9xnBQE3EX5haLzTRcM3YPxptStDU3Ip8oCg5/vEAcbNmbvyW ++6FbEovv8vRMUwK3F1jHM1dCxPhmC9u0pPeahzxHDQnJzen8LjkKkTCWoN1hwTHZ +sz+BoVH8qRRsQbJrAgMBAAECggGAS+NfzhdKZdbatokvK3z2NWoxucIZGBt1hvhg +VpSgGX+fRRR5Me0feeLzDrLD2MYGSXhMEBqecu9W+9h/KlB2u9LT5I4vf3izfJzh +68Qhu0Ji6CtNAviIlQAzOqSGG9/xqNmMUulrihgwaWKEm4jQABPekdP/ZN4Y0UDG +xNyZivwok7E3SqjvCukiQ1qkPHk/rljdxVR2/rzuHzWFsC+3qwSDkcrDFa34w1yR +zPsSBORDZtPDtORachVw+bCC7nbwuNcs/AcrZHOmZB3S2ZOI86eeIEEDdfgUgGMf +3y+FxwIYwfb2LWUNEPiaYagG6CAcjkLnoiU3hXUks4aotG/NW74Zu5VTK6el8eJE +QUdeXDk1972s/nYH2nLOIrlgl5ebyO4keiiX2EhXikDwZGwtH/T/05+gkxYoM+MT +4y12MHCk3j30G6DzVSV+mHsmjIxjy6Hp3bToGDGqq4e4JG3eW04ULtEnmy7wfcV6 +vmgVsB1XtDR/iP8DYSgS5FhQgnuBAoHBAPOVDbPqXEga2C8kpAnDViGllkT/2WtC +88aH5qnBbo6qy8fw2Si1SDbcpuFH9F9kJWEebEwBRQIqIikvJqG6FWL0x10EBJyJ +ihvjleUXX40zZbNC5jYWXre8lwMhFRE2POa23MM39BpXerzGSoM/yitknpoUKpLI +o/dK+jnhswenGaZdGXEJ4Wuk7F/SCHnIy0ussLuWrpWa9m0eGXkzx07iv+KvFqiB +RjMUps/zIMzjSzqTGf/BjmHhu/ID2Wb6kwKBwQDE3DKNiqpxF2vaBGFVvVveN/6F +xZ8xb9Fo3oe3htU5yMQ8X53Qm2l0O8qFhLYJzL265PCvR90lEF4lci3VTkD1igkJ +l0A0YB7ViLdNBDtRASqgQr8SC1HQON/Tf8eSJtlc4eBSEvC8CMzBkHCCEQJ3pwAM +c6h86GFk9O5CJ4IxSlb5zklilrbCMvPcAaXPBDvvaAEu3+peebc5rNVJydCCWnVg +wf6KALOy46vV4lQK3Y0P83sAQILdhm18aewlV8kCgcATOBiLsJs1aUDTf22eA5ah +MwKHUTSjfPXlHUX68PM7OIi5ruxDhQI8qe5U6iuC91Dnt9HNsW/zesWVQE4pBpi3 +ZR7x3OZMP6HxD2T9XXnBl2OX4FhvvhYdIzk7GvohkIj/n0AN+zU8etiJWo/4iSj3 +gYnuGFc/m6ssUPMra/6HRQ0GDmR6gdASgUHRUa4vhcNcpjSPqJT/4uk8tNpHsOkx +9Tw0s6HzC7gxQZjj4YgeAnUmGwM7JdeJK5l7wAgAUxcCgcEAnXr1aetS2/N7kMXO +Q7OeqU8r6kGkTvz/jZ/u++795iLocM+gxL9+hcBLC7KVmZNS+A6rR9z6WOsWFWiY +p7nZF1OcVTLM5tk58WJUGGVZOjzvp5KdnUa25RaKX3wp3436dmYveBi0wgdcHUB5 +8GLBEYzj6hIf6ARR3VxeuUc3zEyG9YGDjT255/Kwx//9b3UYqTUPNTtlFrzRyW6s +lUG/7neEzKmEAUv8dQ/SZ+CNkL+ZV/hUHmh9B8/BCDUH9vuZAoHAflDgoNU3fvsz +NoWWHFvuEmHeiNjhTY6oztWzWXhPU4yCIfPoQQc0dYlpAnKg4oLGBiAKq6FvnPxi +soCJVCniluJjLcD2HyJqf0MoqorS+WJxQrgBHBo9Fh836LnEHlvm+bl31KqCnBQc +l6ughOgq7XOWbxTu96NClwe2ASp7xpPKzb/hizfJSLXOlRlZ7+P234VebrWMPkPq +v4qvU1GqQ58An4gELMzP8SjbAOaB9KfhhWS0ZfjO2bFQZBIqq3b4 +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/openldap/certs/ca_server.pem b/x-pack/test/idp-fixture/openldap/certs/ca_server.pem new file mode 100644 index 0000000000000..d090cbf6d3abc --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/ca_server.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAmugAwIBAgIUGJBYO12hG0Uo/jCDsHqz9KNR25cwDQYJKoZIhvcNAQEL +BQAwGTEXMBUGA1UEAxMOTERBUCBTZXJ2ZXIgQ0EwHhcNMTkwMzA4MTY1MzI2WhcN +MjAwMzA3MTY1MzI2WjAZMRcwFQYDVQQDEw5MREFQIFNlcnZlciBDQTCCAaIwDQYJ +KoZIhvcNAQEBBQADggGPADCCAYoCggGBALtPoq9PwkRZyV2ZKU98RoVxERWOfVw3 +W5QmU+jBDD7vz0xRvjD2Xz+AtAqLyYCE90d8xnaxY42rZ7IXjd9K56rwVuvjgBfV +6iVV1Vo5q5yxsZTsiR+Zrjd3F/AvhTXRsn2GLo2rS2Q64CjodJqzhPvLiZSoUCXs +sdv+sGCZ8H4Wjomygu4HzoS+y+b6wEfcynd7c0thEsYcqacGcvsxXOSU2bnyx8Z4 +T3VeYb2i+DWXBtFACzC/bzVzft+Kui+buktgkdCTkJe2Q/TWjqlhGH/o3A98xZRm ++UDnyJG3Et4MDLNmhpizSsFw8gB4DsK7OrcepcPzsAYvG0VoI5flQnraMNTlTZ5X +bwgtzpRolDEx4qKKPfcYauLEuP5YsteVbPcZwUBNxF+YWi800XDN2D8abUrQ1NyK +fKAoOf7xAHGzZm78lvuhWxKL7/L0TFMCtxdYxzNXQsT4ZgvbtKT3moc8Rw0Jyc3p +/C45CpEwlqDdYcEx2bM/gaFR/KkUbEGyawIDAQABo0MwQTAPBgNVHRMBAf8EBTAD +AQH/MA8GA1UdDwEB/wQFAwMHBAAwHQYDVR0OBBYEFP+p8954Rx6o/i4G/3U6qVKu +lsDJMA0GCSqGSIb3DQEBCwUAA4IBgQCdoxcQxfKd209+TsXjymbrMSWBbfDHLQml +ocKF2EF92hOBJnAm5E8Z2+ZHAZ0hbsl8r1dSIhZuqb7GGE7KfN+mFTP0YlteciPg +0NDM0GsaryftkWe9Lwkt9M72vHP8sCjorskpmYWhHBWgkTfd5Uzso0w3wYNJpeuX +qGOjrjcfpRrDk/fyWUgVPkTBmmEEFWCJHSZF8N+BMHrTiw9UsJXBwbHa983Sm9Lq +pK61y2LTo9OHBHCFl0DmzHiH7AEg9RmJlUqMomn8b/gquIoplUchUtS/h7BVPgqz +w6vS3fE8FgHODthGv55xqhaPV23ZFv6QUlc80yzY3BhYyfu21O/gzo3wmKC6fq9L +pWK2CYgdtDAV0vjTchkYM12iJDLJBdNoLzVZrXqxFMWjVZN2N7GVyc3PGzyOAquf +LuuvSAhErt84mh8MwAbVie09iDTbqPgFjn+D1AK70ocfMVGM2lwOmk0vvCLHtxE7 +fh/9Tf0GS1BHkmgzWwXqTkCK71mJQvs= +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/openldap/certs/dhparam.pem b/x-pack/test/idp-fixture/openldap/certs/dhparam.pem new file mode 100644 index 0000000000000..dd36741664100 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/dhparam.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAtZwsen2sON6thsExpNTYxdZK1TyHdTvGbpzHZwHGSVftCjwrSz7D +8VO4hN6DVP4+Lrb0LeXp1k2jOm2a9yftoWysNmtjVq2KhVcGArH4m0krL87Fmwr3 +k3GdkLuyNkVYndoE9bVBphN3uq7jHRJxwEBge2lUf4tDdEFNlKXx4hRG1T4JW61N +ilvtJQ7Ei9xxjywDqn26H5KABSsfbsTXhjdjg3XH0Gy0SlD3x/8ft97ZX7nvHZzf +2TSyejCy7aW6Avpj6yReZGY5lzVnMvStm8rxCkM9cciZuSJK2mOPfl5cgCZWOux6 +0LLkh9qaZdrBi6dqVOp/A8SQ5/6Ee8/1uwIBAg== +-----END DH PARAMETERS----- diff --git a/x-pack/test/idp-fixture/openldap/certs/ldap_server.csr b/x-pack/test/idp-fixture/openldap/certs/ldap_server.csr new file mode 100644 index 0000000000000..db9f2e7d3a7bc --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/ldap_server.csr @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICWTCCAUECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA1sVcsoRGCjsZ/POI/Eek4NdBM9edkDbHf00L4Vg6 +qjKzoFvLOyEMalan4/vLvvHpY5oYhTn46m9rDDaIZD2vbNDkkzLDHTTRodSsaEu5 +nk7/knmjr6pIXMY9AKVqGOkBIcJOCnzE2PXMDcQY8RPLVSh5/flHaxoYkjXSxqgy +pp0Oo6D3/52B86br9r6XRAZXZtaPOdLAQODwUnr3ANI9RRkcN2ZVAIj2DYsrU2/j +z+hySr/PVBUs5r7ntfNSuEgju0HV0csApatvpjiXLpKQYuiedpzLG+fsSFSSONay +jDLCTz1fCc9j89j6RsiAKsemv3B6sFA59aqGaKusM62vmQIDAQABoAAwDQYJKoZI +hvcNAQELBQADggEBAMdzUH4W+ukDAIYQqMS/JcgriMqKfDbyVb9cynrZFvP3KUvr +40BB3mMJ3upNf9Ndk+V/JeK3hc7DKY6ZpZfJ1q6OnlxLZdp5FGEOrJbix2LYFNgr +3GMXooYiyOoiTkS6cOUUMTcA9KipZZ35MsMXx6DApJiYZtDzlMiUOca75DGl+UE3 +/mlcEwui+qI09WoOqKzMrdnX6XOGVMHtMBh63LPjGT9UcjDAWV6bRpvjXAswzG9y +bzJ5k3aaHsi859ADhzJNjK727tryN0Kr8cVN7AXLcEoi5nhU7Cc8JFW87Pkl7JL7 +O97Tb50CQHTqJ55AhlnC+jaGBVAcCGrQLFB69VI= +-----END CERTIFICATE REQUEST----- diff --git a/x-pack/test/idp-fixture/openldap/certs/ldap_server.key b/x-pack/test/idp-fixture/openldap/certs/ldap_server.key new file mode 100644 index 0000000000000..80c98bae8a6b8 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/ldap_server.key @@ -0,0 +1,182 @@ +Public Key Info: + Public Key Algorithm: RSA + Key Security Level: High (3072 bits) + +modulus: + 00:cc:3e:53:8a:0e:c9:14:3d:f9:ed:bf:87:5d:11:9c + 4c:e3:04:5a:f7:53:11:36:ab:e2:60:2a:fc:cb:0a:8c + 17:c6:93:d4:a6:95:1f:5f:8c:c3:84:37:b2:7e:cd:da + 0e:7d:6d:d1:a0:90:84:8c:56:5d:ae:32:0b:9f:83:47 + 9f:95:ab:6e:d2:76:24:6b:fa:0d:e4:95:93:26:94:66 + 10:dd:39:ae:f8:4b:86:e2:33:56:3f:c6:13:71:4a:8f + 67:7a:90:8d:7f:98:df:09:87:fe:97:5a:55:92:df:9b + ea:eb:b8:5a:43:09:0c:38:22:98:67:d5:e3:69:5e:8b + cd:31:2c:b8:10:54:45:e9:e4:2c:10:83:e4:f8:de:07 + 3b:a6:9d:9b:6c:74:f8:b7:2b:0b:bb:1d:24:b7:55:86 + 82:23:2b:51:e7:63:21:ae:1b:86:b5:a8:c0:63:38:59 + 03:3b:69:c7:2d:6c:e2:ef:89:91:67:f2:75:0e:d5:9e + 18:ce:e6:c5:db:58:7e:2c:2a:5e:70:15:77:ad:e9:41 + f5:5c:bd:e8:67:17:c8:5c:fa:2d:7a:f3:5b:bf:51:f1 + ee:22:f9:c4:f1:21:40:c6:d9:57:8f:54:d0:be:7f:dd + 81:57:b6:8c:a2:2b:90:1c:58:2c:cf:b5:c8:93:34:5c + 88:be:91:7d:5f:4f:09:50:cb:7e:b7:05:16:86:07:3f + ec:a5:86:4c:45:99:98:77:84:39:1c:24:f9:24:8a:74 + be:dd:26:41:04:7b:23:71:79:85:07:c2:9e:cc:ba:ce + e5:4b:85:80:37:ee:57:50:74:94:f2:59:0c:a7:c3:36 + e2:8f:2c:e7:ee:d3:ee:82:bd:b5:80:bd:2a:a6:fb:0b + eb:f7:b9:c0:e9:20:99:88:f3:05:05:3d:d8:64:6c:d1 + 51:57:ad:3c:f3:8b:51:1d:ba:0b:8b:77:bf:84:ec:25 + 7d:18:d5:17:ad:6d:85:34:11:8b:e0:a7:78:b4:61:4c + f3: + +public exponent: + 01:00:01: + +private exponent: + 17:91:dd:bc:21:e6:ac:92:56:64:4c:65:07:a5:7f:9f + 13:d7:ad:d3:ba:df:52:aa:d5:4d:f9:c8:00:7a:70:1e + 52:49:3f:12:16:45:6f:96:63:fd:bf:1b:b7:2c:74:4c + 95:c6:f5:99:0c:f4:6e:e9:8c:98:0c:a8:70:b9:0a:ab + ec:ee:c2:b4:40:f4:05:0d:ef:4a:80:c4:61:80:60:a2 + f3:94:b3:8f:6d:cf:dc:0c:c6:98:47:78:78:eb:7c:cf + 19:42:d3:41:75:57:cf:d6:c4:d7:9c:92:21:87:f2:8b + 52:b6:8c:9c:0d:d0:3e:2e:27:de:5b:bd:89:ac:62:7f + c4:7b:ff:a5:ca:c3:e2:b0:f5:30:d6:b7:b6:cc:90:93 + 1e:b0:33:7e:79:a5:36:26:1e:8d:72:58:89:5b:b2:51 + 14:53:ec:78:c2:64:e1:9d:c5:96:f3:29:78:89:c9:23 + e9:58:54:a6:f6:58:de:01:c7:28:f2:be:f5:7e:4b:67 + 26:88:66:3e:9f:33:8d:93:9a:aa:6f:26:fa:a3:0f:6b + 0d:f1:4b:43:eb:75:26:17:ea:58:05:78:66:f2:d6:56 + 0c:90:fb:56:80:d1:0e:6d:96:53:5c:f0:f2:0e:76:5b + f7:f0:c5:9f:fb:82:9c:ff:fa:2c:b7:db:66:1e:06:cb + 71:8b:b3:f6:5d:40:6e:b4:6a:ed:fc:f3:7b:f5:b0:82 + 81:ba:99:9e:f8:1b:71:15:72:a7:34:a6:f2:d8:cd:4c + 6d:b2:db:d0:7a:45:7a:8a:ab:ea:34:89:dd:35:16:5f + d1:a7:d5:22:05:d2:fb:23:47:0e:98:ff:34:e5:9a:be + ac:20:4b:03:c1:2c:90:2d:58:2b:4a:7d:f3:70:3c:a7 + a3:ed:54:fc:46:fe:cd:0c:cc:fb:c6:c6:e3:f8:25:e8 + b0:2e:12:5f:9f:74:65:1c:17:6d:ff:d5:e3:8e:12:b8 + ea:a0:84:de:d3:c5:1d:62:48:39:05:28:20:09:a0:49 + + +prime1: + 00:d1:c7:15:99:9c:ca:fb:df:78:58:04:10:7d:1e:15 + af:6f:1a:58:d4:c6:1f:34:12:42:79:97:23:22:8c:b9 + b5:2f:dc:8d:31:37:01:6c:ee:d0:a5:3a:91:82:af:d0 + 2f:20:be:fb:e3:35:35:02:71:73:9e:8e:37:a5:e7:78 + db:08:e3:64:10:01:88:84:b2:c5:70:13:e3:80:36:b4 + f3:92:97:e1:47:f8:54:f9:1f:b3:c7:8b:a3:00:91:3e + b8:9b:58:e3:8d:d4:aa:0a:57:e6:77:50:b8:f2:5c:f4 + 60:79:f6:65:14:e1:ed:7b:e2:f3:53:81:72:1a:95:0d + 08:cd:9b:d2:d1:37:df:78:43:80:9e:f8:97:92:6c:28 + f3:43:4d:1e:4f:9e:e4:3d:0e:65:10:8d:26:49:f4:96 + 3f:d0:e8:fb:92:f3:db:4d:2a:8a:c6:30:5a:f3:bb:3e + 72:1b:f1:7d:07:94:ba:00:a3:6c:3d:12:38:b8:78:78 + c5: + +prime2: + 00:f9:3f:12:f5:b1:78:bd:2a:b3:40:63:67:bd:44:03 + 13:35:d3:b8:8e:32:e3:c4:81:f3:32:eb:e5:54:64:a3 + 87:13:93:03:fa:66:9f:a4:b3:5c:89:85:b5:07:3b:3c + dd:4a:45:0f:7c:23:cb:7f:16:fc:75:d5:46:75:c3:df + 20:7a:e9:ca:93:20:ff:a1:85:04:e1:ce:2e:f6:be:f5 + 97:35:5a:25:c0:11:5b:8d:cc:35:8f:62:64:f3:11:f3 + 21:8a:18:0d:6e:dc:b3:a9:cf:0b:d5:3e:9d:41:67:95 + 19:02:2d:8f:72:6d:04:b0:d9:73:cb:0c:13:88:0a:ab + 02:4f:c4:7e:8d:be:b5:99:98:15:61:7d:db:14:59:44 + 41:f7:da:3d:be:2a:71:5c:27:71:0e:2f:c6:b1:32:ac + d7:2a:f1:41:7d:3b:d4:92:a2:5f:5b:cb:e1:b1:95:f7 + 5b:c6:09:f8:d6:80:02:81:44:f9:fd:19:cc:36:bf:5a + 57: + +coefficient: + 10:45:fc:ba:29:7c:26:04:e4:93:eb:de:23:5a:f7:aa + 34:5b:ff:3e:6a:3f:68:66:ba:32:3f:16:e1:0c:39:3a + bb:39:70:c4:1e:71:0b:55:67:ab:d7:f6:f2:d8:ec:9a + 49:89:b8:85:0a:fb:69:66:20:58:94:fa:2d:d8:97:47 + 23:46:4c:67:5c:c3:da:e0:75:80:40:b3:0c:b4:29:d1 + be:61:88:0a:cf:9e:a8:94:5c:f1:05:c4:32:ae:0e:c8 + df:05:e2:59:5e:25:3a:12:f6:83:bd:98:1f:86:af:51 + 9d:22:25:cf:60:39:d1:8d:40:ae:4e:c9:85:10:e1:93 + 2f:b5:8a:3f:d0:9e:0c:44:31:d1:66:d1:15:e1:c3:df + 49:30:46:22:86:ca:e3:cf:33:7d:8b:8c:2c:bc:ad:62 + ef:7f:88:80:9d:d7:ba:b2:c4:13:c9:49:99:f6:7e:4f + fa:71:f0:8c:ec:b7:89:79:1d:13:16:ee:e9:40:bf:bc + + +exp1: + 00:b1:3b:ec:89:ee:28:a9:08:b3:1a:59:c4:bc:90:d0 + 6d:7a:06:de:8a:56:21:42:d3:32:8a:9c:20:b9:2e:00 + 30:8e:91:5e:50:f8:52:22:00:db:12:de:1a:d6:39:48 + 40:a8:ff:f2:bc:91:d5:89:8c:e4:cc:4f:07:c1:09:81 + d8:3c:f8:31:30:3c:bb:64:b1:cd:91:d5:9b:de:55:5a + 28:77:f0:0e:0b:ca:49:b0:79:12:61:43:1e:a2:7a:14 + 6d:d0:02:17:af:5a:bf:97:21:90:bb:b9:dd:a8:16:d4 + f9:35:d1:f0:b0:e5:f8:c9:34:83:b4:47:a7:69:c2:08 + 53:29:b3:bf:37:b8:66:fb:aa:bc:5e:93:55:2c:06:69 + ca:ad:4f:d8:8e:18:86:68:f7:62:77:cb:69:50:0e:6f + 6a:d7:4c:20:50:95:ea:f0:95:42:ce:13:48:a4:a7:cc + 28:88:67:4c:e5:3c:c8:b1:85:61:0b:6f:fb:b0:0f:48 + a9: + +exp2: + 00:d9:8d:0f:4c:7a:aa:6b:5a:c0:f8:db:8b:29:ed:1a + 4e:03:36:98:a9:2d:8c:16:f2:62:47:39:15:83:73:10 + 35:0f:85:cb:fc:1c:e5:0d:4a:2c:e4:30:5b:91:43:48 + 5d:d2:bc:b2:c6:65:2b:f2:f3:2a:4a:e3:6c:4b:d0:3a + 8a:4c:1e:92:5d:97:78:e9:0f:84:f2:5f:18:28:f1:06 + c6:03:a0:7a:c0:c9:a5:c2:ff:d2:64:23:ba:ff:5a:d4 + 7f:9a:dc:61:a5:39:68:6c:38:37:18:db:3d:41:60:5b + 08:68:19:42:52:db:0b:34:1d:a9:6f:3a:4c:35:fb:31 + c6:a3:bf:5b:c2:f7:09:e5:55:94:9b:f2:d1:6d:52:79 + 3c:a7:4c:cb:85:97:e0:38:52:88:e0:9c:1c:ee:7f:cf + d3:43:3d:2f:2f:53:30:b9:08:96:f9:35:dc:fa:37:6e + 9f:a1:1d:05:20:9c:bd:12:fe:14:c1:b1:69:c9:03:7e + a3: + + +Public Key PIN: + pin-sha256:wyBZajWAUdEUpePlX91IQ/gQL4qDtQQpucnDP9XsdCY= +Public Key ID: + sha256:c320596a358051d114a5e3e55fdd4843f8102f8a83b50429b9c9c33fd5ec7426 + sha1:21275e0d7571a19e74ef0fd70c7af2423c61d6b8 + +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAzD5Tig7JFD357b+HXRGcTOMEWvdTETar4mAq/MsKjBfGk9Sm +lR9fjMOEN7J+zdoOfW3RoJCEjFZdrjILn4NHn5WrbtJ2JGv6DeSVkyaUZhDdOa74 +S4biM1Y/xhNxSo9nepCNf5jfCYf+l1pVkt+b6uu4WkMJDDgimGfV42lei80xLLgQ +VEXp5CwQg+T43gc7pp2bbHT4tysLux0kt1WGgiMrUedjIa4bhrWowGM4WQM7acct +bOLviZFn8nUO1Z4YzubF21h+LCpecBV3relB9Vy96GcXyFz6LXrzW79R8e4i+cTx +IUDG2VePVNC+f92BV7aMoiuQHFgsz7XIkzRciL6RfV9PCVDLfrcFFoYHP+ylhkxF +mZh3hDkcJPkkinS+3SZBBHsjcXmFB8KezLrO5UuFgDfuV1B0lPJZDKfDNuKPLOfu +0+6CvbWAvSqm+wvr97nA6SCZiPMFBT3YZGzRUVetPPOLUR26C4t3v4TsJX0Y1Ret +bYU0EYvgp3i0YUzzAgMBAAECggGAF5HdvCHmrJJWZExlB6V/nxPXrdO631Kq1U35 +yAB6cB5SST8SFkVvlmP9vxu3LHRMlcb1mQz0bumMmAyocLkKq+zuwrRA9AUN70qA +xGGAYKLzlLOPbc/cDMaYR3h463zPGULTQXVXz9bE15ySIYfyi1K2jJwN0D4uJ95b +vYmsYn/Ee/+lysPisPUw1re2zJCTHrAzfnmlNiYejXJYiVuyURRT7HjCZOGdxZbz +KXiJySPpWFSm9ljeAcco8r71fktnJohmPp8zjZOaqm8m+qMPaw3xS0PrdSYX6lgF +eGby1lYMkPtWgNEObZZTXPDyDnZb9/DFn/uCnP/6LLfbZh4Gy3GLs/ZdQG60au38 +83v1sIKBupme+BtxFXKnNKby2M1MbbLb0HpFeoqr6jSJ3TUWX9Gn1SIF0vsjRw6Y +/zTlmr6sIEsDwSyQLVgrSn3zcDyno+1U/Eb+zQzM+8bG4/gl6LAuEl+fdGUcF23/ +1eOOErjqoITe08UdYkg5BSggCaBJAoHBANHHFZmcyvvfeFgEEH0eFa9vGljUxh80 +EkJ5lyMijLm1L9yNMTcBbO7QpTqRgq/QLyC+++M1NQJxc56ON6XneNsI42QQAYiE +ssVwE+OANrTzkpfhR/hU+R+zx4ujAJE+uJtY443UqgpX5ndQuPJc9GB59mUU4e17 +4vNTgXIalQ0IzZvS0TffeEOAnviXkmwo80NNHk+e5D0OZRCNJkn0lj/Q6PuS89tN +KorGMFrzuz5yG/F9B5S6AKNsPRI4uHh4xQKBwQD5PxL1sXi9KrNAY2e9RAMTNdO4 +jjLjxIHzMuvlVGSjhxOTA/pmn6SzXImFtQc7PN1KRQ98I8t/Fvx11UZ1w98geunK +kyD/oYUE4c4u9r71lzVaJcARW43MNY9iZPMR8yGKGA1u3LOpzwvVPp1BZ5UZAi2P +cm0EsNlzywwTiAqrAk/Efo2+tZmYFWF92xRZREH32j2+KnFcJ3EOL8axMqzXKvFB +fTvUkqJfW8vhsZX3W8YJ+NaAAoFE+f0ZzDa/WlcCgcEAsTvsie4oqQizGlnEvJDQ +bXoG3opWIULTMoqcILkuADCOkV5Q+FIiANsS3hrWOUhAqP/yvJHViYzkzE8HwQmB +2Dz4MTA8u2SxzZHVm95VWih38A4LykmweRJhQx6iehRt0AIXr1q/lyGQu7ndqBbU ++TXR8LDl+Mk0g7RHp2nCCFMps783uGb7qrxek1UsBmnKrU/YjhiGaPdid8tpUA5v +atdMIFCV6vCVQs4TSKSnzCiIZ0zlPMixhWELb/uwD0ipAoHBANmND0x6qmtawPjb +iyntGk4DNpipLYwW8mJHORWDcxA1D4XL/BzlDUos5DBbkUNIXdK8ssZlK/LzKkrj +bEvQOopMHpJdl3jpD4TyXxgo8QbGA6B6wMmlwv/SZCO6/1rUf5rcYaU5aGw4Nxjb +PUFgWwhoGUJS2ws0HalvOkw1+zHGo79bwvcJ5VWUm/LRbVJ5PKdMy4WX4DhSiOCc +HO5/z9NDPS8vUzC5CJb5Ndz6N26foR0FIJy9Ev4UwbFpyQN+owKBwBBF/LopfCYE +5JPr3iNa96o0W/8+aj9oZroyPxbhDDk6uzlwxB5xC1Vnq9f28tjsmkmJuIUK+2lm +IFiU+i3Yl0cjRkxnXMPa4HWAQLMMtCnRvmGICs+eqJRc8QXEMq4OyN8F4lleJToS +9oO9mB+Gr1GdIiXPYDnRjUCuTsmFEOGTL7WKP9CeDEQx0WbRFeHD30kwRiKGyuPP +M32LjCy8rWLvf4iAnde6ssQTyUmZ9n5P+nHwjOy3iXkdExbu6UC/vA== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/openldap/certs/ldap_server.pem b/x-pack/test/idp-fixture/openldap/certs/ldap_server.pem new file mode 100644 index 0000000000000..91a7bc5e4f0e3 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/ldap_server.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEZTCCAs2gAwIBAgIUZBVU/AqHa0docwSi4yaYO9Obx3EwDQYJKoZIhvcNAQEL +BQAwGTEXMBUGA1UEAxMOTERBUCBTZXJ2ZXIgQ0EwHhcNMTkwMzA4MTY1NDEzWhcN +MjkwMzA3MTY1NDEzWjAwMRIwEAYDVQQDEwlsb2NhbGhvc3QxGjAYBgNVBAoTEUVs +YXN0aWNzZWFyY2ggQi5WMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA +zD5Tig7JFD357b+HXRGcTOMEWvdTETar4mAq/MsKjBfGk9SmlR9fjMOEN7J+zdoO +fW3RoJCEjFZdrjILn4NHn5WrbtJ2JGv6DeSVkyaUZhDdOa74S4biM1Y/xhNxSo9n +epCNf5jfCYf+l1pVkt+b6uu4WkMJDDgimGfV42lei80xLLgQVEXp5CwQg+T43gc7 +pp2bbHT4tysLux0kt1WGgiMrUedjIa4bhrWowGM4WQM7acctbOLviZFn8nUO1Z4Y +zubF21h+LCpecBV3relB9Vy96GcXyFz6LXrzW79R8e4i+cTxIUDG2VePVNC+f92B +V7aMoiuQHFgsz7XIkzRciL6RfV9PCVDLfrcFFoYHP+ylhkxFmZh3hDkcJPkkinS+ +3SZBBHsjcXmFB8KezLrO5UuFgDfuV1B0lPJZDKfDNuKPLOfu0+6CvbWAvSqm+wvr +97nA6SCZiPMFBT3YZGzRUVetPPOLUR26C4t3v4TsJX0Y1RetbYU0EYvgp3i0YUzz +AgMBAAGjgY0wgYowDAYDVR0TAQH/BAIwADAUBgNVHREEDTALgglsb2NhbGhvc3Qw +EwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNVHQ4EFgQU +ISdeDXVxoZ507w/XDHryQjxh1rgwHwYDVR0jBBgwFoAU/6nz3nhHHqj+Lgb/dTqp +Uq6WwMkwDQYJKoZIhvcNAQELBQADggGBAArKYQAXly79TzWGvaA8lY82LzbBqRPg +OD8bx15fSA87ODdLgjOsJncN4LPJXbBMTT+0uoOh43/+6eBcvKCeyUEfb3nyR0EN +W5JQu7AOtcUNqlsDkvCNfF4hqdICJqis+Z9GZi9K1k444wL5hjzt5wRY5/Qrlvsn +jZD2d/ZiUfEj1sQyAQGpwmTDALGIb6UkfTWCBJgLSAp6E8BWzj9RAlNvDvONk5NC +yFFzzbz1uohP8z9dZPwv/eVN5IV7JFCW+SQUcsbThFS+DJ8Qkj3Zc1uGXEW1duEz +I6OBNGAVSHloP9S5cQWhjbOAGFeHY0zVh1dYj+EI/NxEkNUHC7AeYfE1tcCiMsro +wIUGONPmsaF8ArVeX3NJY+0WYoU+x4OBtllTsvV64giAWamqn7Fra9SGqRmBPmqQ +I7gVz2BPVINLXuxkcIbnDg7Ck7DB4HPPZKbg+n79c3HOs2g2Fo0R22UBk15GfpQ3 +wcgKu37UYja8tk+faDOuiHtnumAuUx/Nzg== +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/openldap/certs/templates/ca_server.conf b/x-pack/test/idp-fixture/openldap/certs/templates/ca_server.conf new file mode 100644 index 0000000000000..dedc35fa94e52 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/templates/ca_server.conf @@ -0,0 +1,3 @@ +cn = LDAP Server CA +ca +cert_signing_key diff --git a/x-pack/test/idp-fixture/openldap/certs/templates/ldap_server.conf b/x-pack/test/idp-fixture/openldap/certs/templates/ldap_server.conf new file mode 100644 index 0000000000000..5dd1446afa8a6 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/certs/templates/ldap_server.conf @@ -0,0 +1,7 @@ +organization = "Elasticsearch B.V" +cn = localhost +dns_name = localhost +tls_www_server +encryption_key +signing_key +expiration_days = 3652 diff --git a/x-pack/test/idp-fixture/openldap/ldif/config.ldif b/x-pack/test/idp-fixture/openldap/ldif/config.ldif new file mode 100644 index 0000000000000..45d2c20659975 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/ldif/config.ldif @@ -0,0 +1,15 @@ + +dn: olcDatabase={1}mdb,cn=config +changetype: modify +replace: olcAccess +olcAccess: {0}to attrs=userPassword,shadowLastChange + by self write + by anonymous auth + by dn="cn=admin,DC=oldap,DC=test,DC=elasticsearch,DC=com" write + by * none +olcAccess: {1}to dn.base="" by * read +olcAccess: {2}to * + by self write + by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" write + by dn="cn=admin,DC=oldap,DC=test,DC=elasticsearch,DC=com" write + by * read diff --git a/x-pack/test/idp-fixture/openldap/ldif/users.ldif b/x-pack/test/idp-fixture/openldap/ldif/users.ldif new file mode 100644 index 0000000000000..367672d274900 --- /dev/null +++ b/x-pack/test/idp-fixture/openldap/ldif/users.ldif @@ -0,0 +1,234 @@ + +dn: ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +objectClass: organizationalUnit +ou: people + +dn: uid=kraken,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: kraken +uidNumber: 1000 +gidNumber: 1000 +homeDirectory: /home/1000 +cn: Commander Kraken +sn: Commander Kraken +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: kraken@oldap.test.elasticsearch.com + +dn: uid=hulk,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: hulk +uidNumber: 1001 +gidNumber: 1001 +homeDirectory: /home/1001 +cn: Bruce Banner +sn: Bruce Banner +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: hulk@oldap.test.elasticsearch.com + +dn: uid=hawkeye,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: hawkeye +uidNumber: 1002 +gidNumber: 1002 +homeDirectory: /home/1001 +cn: Clint Barton +sn: Clint Barton +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: hawkeye@oldap.test.elasticsearch.com + +dn: uid=jarvis,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: jarvis +uidNumber: 1003 +gidNumber: 1003 +homeDirectory: /home/1003 +cn: Jarvis +sn: Jarvis +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: jarvis@oldap.test.elasticsearch.com + +dn: uid=blackwidow,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: blackwidow +uidNumber: 1004 +gidNumber: 1004 +homeDirectory: /home/1004 +cn: Natasha Romanova +sn: Natasha Romanova +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: blackwidow@oldap.test.elasticsearch.com + +dn: uid=fury,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: fury +uidNumber: 1005 +gidNumber: 1005 +homeDirectory: /home/1005 +cn: Nick Fury +sn: Nick Fury +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: fury@oldap.test.elasticsearch.com + +dn: uid=phil,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: phil +uidNumber: 1006 +gidNumber: 1006 +homeDirectory: /home/1006 +cn: Phil Colson +sn: Phil Colson +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: phil@oldap.test.elasticsearch.com + +dn: uid=cap,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: cap +uidNumber: 1007 +gidNumber: 1007 +homeDirectory: /home/1007 +cn: Steve Rogers +sn: Steve Rogers +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: cap@oldap.test.elasticsearch.com + +dn: uid=thor,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: thor +uidNumber: 1008 +gidNumber: 1008 +homeDirectory: /home/1008 +cn: Thor Odinson +sn: Thor Odinson +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: thor@oldap.test.elasticsearch.com + +dn: uid=ironman,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: ironman +uidNumber: 1009 +gidNumber: 1009 +homeDirectory: /home/1009 +cn: Tony Stark +sn: Tony Stark +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: ironman@oldap.test.elasticsearch.com + +dn: uid=odin,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: odin +uidNumber: 1010 +gidNumber: 1010 +homeDirectory: /home/1010 +cn: Gods +sn: Gods +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: odin@oldap.test.elasticsearch.com + +dn: uid=selvig,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +uid: selvig +uidNumber: 1011 +gidNumber: 1011 +homeDirectory: /home/1011 +cn: Erik Selvig +sn: Erik Selvig +objectClass: top +objectClass: posixAccount +objectClass: inetOrgPerson +userPassword: NickFuryHeartsES +mail: selvig@oldap.test.elasticsearch.com + +dn: cn=Hydra,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Hydra +objectClass: top +objectClass: posixGroup +gidNumber: 101 +memberUid: raken + +dn: cn=Geniuses,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Geniuses +objectClass: top +objectClass: posixGroup +gidNumber: 102 +memberUid: hulk +memberUid: ironman +memberUid: selvig + +dn: cn=SHIELD,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: SHIELD +objectClass: top +objectClass: posixGroup +gidNumber: 103 +memberUid: hulk +memberUid: hawkeye +memberUid: blackwidow +memberUid: fury +memberUid: phil +memberUid: cap +memberUid: thor +memberUid: ironman + +dn: cn=Philanthropists,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Philanthropists +objectClass: top +objectClass: posixGroup +gidNumber: 104 +memberUid: hulk +memberUid: thor +memberUid: ironman + +dn: cn=Avengers,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Avengers +objectClass: top +objectClass: posixGroup +gidNumber: 105 +memberUid: hulk +memberUid: hawkeye +memberUid: blackwidow +memberUid: fury +memberUid: cap +memberUid: thor +memberUid: ironman + +dn: cn=Gods,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Gods +objectClass: top +objectClass: posixGroup +gidNumber: 106 +memberUid: thor +memberUid: odin + +dn: cn=Playboys,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Playboys +objectClass: top +objectClass: posixGroup +gidNumber: 107 +memberUid: ironman + +dn: cn=Billionaries,ou=people,DC=oldap,DC=test,DC=elasticsearch,DC=com +cn: Billionaries +objectClass: top +objectClass: posixGroup +gidNumber: 108 +memberUid: ironman \ No newline at end of file diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/README.txt b/x-pack/test/idp-fixture/src/main/resources/certs/README.txt deleted file mode 100644 index 0cf738156a7f0..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/certs/README.txt +++ /dev/null @@ -1,15 +0,0 @@ -File in this directory are: - -idp-ca.crt -idp-ca.key - Description: A CA for the IdP - Generated Date: 2018-02-07 - Command: bin/elasticsearch-certutil ca --ca-dn 'CN=idp-fixture,OU=elasticsearch,DC=elastic,DC=co' --days 5000 -keysize 1024 --out idp-ca.zip --pem - X-Pack Version: 6.2.0 - -idptrust.jks - Description: Java Keystore Format of CA cert - Generated Date: 2018-02-07 - Command: keytool -importcert -file ca.crt -alias idp-fixture-ca -keystore idptrust.jks -noprompt -storepass changeit - Java Version: Java(TM) SE Runtime Environment (build 9.0.1+11) - diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt b/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt deleted file mode 100644 index 1ab8e866c1785..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDEDCCAnmgAwIBAgIVAOLlDV8Lvg17LwKqchYKcsog1SyKMA0GCSqGSIb3DQEB -CwUAMFsxEjAQBgoJkiaJk/IsZAEZFgJjbzEXMBUGCgmSJomT8ixkARkWB2VsYXN0 -aWMxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxFDASBgNVBAMTC2lkcC1maXh0dXJl -MB4XDTE4MDIwNzAzMjAwNloXDTMxMTAxNzAzMjAwNlowWzESMBAGCgmSJomT8ixk -ARkWAmNvMRcwFQYKCZImiZPyLGQBGRYHZWxhc3RpYzEWMBQGA1UECxMNZWxhc3Rp -Y3NlYXJjaDEUMBIGA1UEAxMLaWRwLWZpeHR1cmUwgZ8wDQYJKoZIhvcNAQEBBQAD -gY0AMIGJAoGBALWf8R7uGnrrmuQ26khwQ/81f+x57RgE1cHQGp0sBkwsijzZPpuU -8ZkqYMNXG/LU2hNfAv4LeCsighgo4Le+TkBKncbucQcNM+dLINvhAfgYp9QAdGjk -89hxWEQ6p/Tr98TG0Qd7jZa6bu8azMf7+bmjKpHaffIMpxDnkPZsaxodAgMBAAGj -gc8wgcwwHQYDVR0OBBYEFDsd63fpzLH1G+aduhypBPctWuNNMIGZBgNVHSMEgZEw -gY6AFDsd63fpzLH1G+aduhypBPctWuNNoV+kXTBbMRQwEgYDVQQDEwtpZHAtZml4 -dHVyZTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEXMBUGCgmSJomT8ixkARkWB2Vs -YXN0aWMxEjAQBgoJkiaJk/IsZAEZFgJjb4IVAOLlDV8Lvg17LwKqchYKcsog1SyK -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAi1bfK31u7deMDLXv -Axrg1nJjEzMjkb6F/tqA2hJCokvWz2sdKPLHfrfOu2edHm4qQABAdnmRtE/1xsYm -xVuZA+O7khEkXv5ql65HIgCHL0hEvFWfKzMDCjgm+1rvNTMbgsRj2RGzEQeu/Aqg -Nv2mnc0Vjk3kaAQ0JtmCI8k6fM0= ------END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/ca.key b/x-pack/test/idp-fixture/src/main/resources/certs/ca.key deleted file mode 100644 index 9f93ff3b7ceab..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/certs/ca.key +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQC1n/Ee7hp665rkNupIcEP/NX/see0YBNXB0BqdLAZMLIo82T6b -lPGZKmDDVxvy1NoTXwL+C3grIoIYKOC3vk5ASp3G7nEHDTPnSyDb4QH4GKfUAHRo -5PPYcVhEOqf06/fExtEHe42Wum7vGszH+/m5oyqR2n3yDKcQ55D2bGsaHQIDAQAB -AoGACfOsm5xCWS/ludGEftmf8DD3RHbd1e4V5FvJyYjrA2uBW5ovwwijQFhBGxL/ -1gtxs5QGLvNj70Ehzb8XqRnFYcrSUxkABCcO9vJf8wuamtPeaQzlSVSVM9myjkBu -2EhegkFXSgFiVX6A/sxm8e8bqxxouz46Upa2/YLKhcb5oiECQQDb3HhP0hIx0oDj -h1FXLACtbTlYUg8gGylD17RsWSPB765tOTt65/KztyH8BmdlTAKxIC5BHEQLYiug -u3KwPEk5AkEA03qFxj/quoH6l0y7i8kah97KCtiM0kg4oXYDuSDIzt4NqdNw/UWx -p3DGiIPpY5errR1ytyPiiuM2j+c5oUcMBQJAfC4SZkMos6tJ0Tlk3++iklHWyePP -VzsAG6mB5pCSeb9+rYJd7hWEJ62QLGERlU1RV+ntNilY5XUVXzuAk7n5QQJBANLg -31q0S9WVXRPYUT/v1kPcVi6Ah9P8bnQa4VWOqo8WABvzmz0DbUahf2eL2oQULv3e -WpDi+Lk0HylaEi6PUR0CQQDHTzjyjuTLmnPw5AvZw7oQgilZxTUhOapw3Ihcq/KA -T8oFnLwmnMs+kZOO6e2QcagXaFXufH1w/MvxhSjHj8SO ------END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks b/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks deleted file mode 100644 index fbd3135095f9b..0000000000000 Binary files a/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks and /dev/null differ diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml b/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml deleted file mode 100644 index 24474a832d208..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: all - - vars_files: - - vars/default.yml - - roles: - - { role: java-8-openjdk, become: yes } - - { role: certs, become: yes } - - { role: openldap, become: yes } - - { role: tomcat-8, become: yes } - - { role: shibboleth, become: yes } diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README deleted file mode 100644 index 748d912bd2c5f..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README +++ /dev/null @@ -1,2 +0,0 @@ -Generates CA and OpenLDAP Service Key/Certificate -See: https://www.digitalocean.com/community/tutorials/how-to-encrypt-openldap-connections-using-starttls diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf deleted file mode 100644 index 3a412ac90760d..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf +++ /dev/null @@ -1,3 +0,0 @@ -cn = Vagrant Server CA -ca -cert_signing_key diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml deleted file mode 100644 index 553b9eff5d703..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml +++ /dev/null @@ -1,106 +0,0 @@ ---- - -- name: Install GNUtls - apt: - name: "{{ item }}" - state: installed - update_cache: yes - with_items: - - gnutls-bin - - ssl-cert - -- name: Create GNUtls templates dir - file: - path: "{{ ssl_dir_templates }}" - state: directory - group: ssl-cert - mode: 0777 - -- name: Copy server cert template - template: - src: cert_server.conf.j2 - dest: "{{ ssl_dir_templates }}/cert_server.conf" - mode: 0666 - -- name: Copy server keystore template - template: - src: keystore_server.conf.j2 - dest: "{{ ssl_dir_templates }}/keystore_server.conf" - mode: 0666 - -- name: Copy CA Cert - copy: - src: "../certs/ca.crt" - dest: "{{ ssl_dir_certs }}/ca_server.pem" - mode: 0666 - register: copy_ca - -- name: Copy CA Key - copy: - src: "../certs/ca.key" - dest: "{{ ssl_dir_private }}/ca_server.key" - mode: 0600 - -- name: Create Key for LDAP Service - command: "certtool -p --sec-param high --outfile {{ ssl_dir_private }}/{{ openldap_key_name }}" - args: - creates: "{{ ssl_dir_private }}/{{ openldap_key_name }}" - -- name: Set group for LDAP Key - file: - path: "{{ ssl_dir_private }}/{{ openldap_key_name }}" - group: ssl-cert - mode: 0640 - -- name: Delete old LDAP cert - file: - path: "{{ ssl_dir_certs }}/{{ openldap_cert_name}}" - state: absent - when: copy_ca.changed - -- name: Create Cert for LDAP - command: "certtool -c --load-privkey {{ ssl_dir_private }}/{{ openldap_key_name }} --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-ca-privkey {{ ssl_dir_private }}/ca_server.key --template {{ ssl_dir_templates }}/cert_server.conf --outfile {{ ssl_dir_certs }}/{{ openldap_cert_name}}" - args: - creates: "{{ ssl_dir_certs }}/{{ openldap_cert_name}}" - -- name: Create Key for Tomcat Service - command: "certtool -p --sec-param high --outfile {{ ssl_dir_private }}/{{ tomcat_key_name }}" - args: - creates: "{{ ssl_dir_private }}/{{ tomcat_key_name }}" - -- name: Set group for Tomcat Key - file: - path: "{{ ssl_dir_private }}/{{ tomcat_key_name }}" - group: ssl-cert - mode: 0640 - -- name: Delete old Tomcat cert - file: - path: "{{ ssl_dir_certs }}/{{ tomcat_cert_name }}" - state: absent - when: copy_ca.changed - -- name: Create Cert for Tomcat - command: "certtool -c --load-privkey {{ ssl_dir_private }}/{{ tomcat_key_name }} --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-ca-privkey {{ ssl_dir_private }}/ca_server.key --template {{ ssl_dir_templates }}/cert_server.conf --outfile {{ ssl_dir_certs }}/{{ tomcat_cert_name}}" - args: - creates: "{{ ssl_dir_certs }}/{{ tomcat_cert_name}}" - register: tomcat_cert - -- name: Delete old Tomcat Keystore - file: - path: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" - state: absent - when: tomcat_cert.changed - -- name: Create Keystore for Tomcat - command: "certtool --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-privkey {{ ssl_dir_private }}/{{ tomcat_key_name }} --load-certificate {{ ssl_dir_certs }}/{{ tomcat_cert_name }} --template {{ ssl_dir_templates }}/keystore_server.conf --outder --to-p12 --outfile {{ ssl_dir_private }}/{{ tomcat_keystore_name }}" - args: - creates: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" - notify: Restart Tomcat Service - -- name: Set group for Tomcat Keystore - file: - path: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" - group: ssl-cert - mode: 0640 -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 deleted file mode 100644 index 448264d368198..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 +++ /dev/null @@ -1,7 +0,0 @@ -organization = Elastic.co -cn = {{ ansible_fqdn }} -dns_name = {{ ansible_fqdn }} -tls_www_server -encryption_key -signing_key -expiration_days = 3652 diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 deleted file mode 100644 index ee7eeaf52398e..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 +++ /dev/null @@ -1,2 +0,0 @@ -pkcs12_key_name = {{ tomcat_key_alias }} -password = {{ tomcat_keystore_pass }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml deleted file mode 100644 index a364fc641d2ad..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml +++ /dev/null @@ -1 +0,0 @@ -ssl_dir_templates: "{{ ssl_dir }}/templates" diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml deleted file mode 100644 index 1951a6f31a086..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -- name: Install openjdk-8 - apt: - name: openjdk-8-jdk-headless - state: latest - update_cache: true - -- name: Capture JAVA_HOME - shell: readlink -e /etc/alternatives/java | sed 's|/jre/bin/java||' - register: java_home - -- set_fact: - java_home: "{{ java_home.stdout }}" - -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml deleted file mode 100644 index cad57b49f6a26..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: restart slapd - become: yes - service: name=slapd state=restarted diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml deleted file mode 100644 index 00a06b319b12f..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml +++ /dev/null @@ -1,29 +0,0 @@ -- name: Hash Password - shell: "slappasswd -s {{ ldap_password }}" - register: passwd - -- name: Setting default db - ldap_attr: - dn: "{{ default_db_dn }}" - name: "{{ item.key }}" - values: "{{ item.val }}" - state: exact - with_items: - - { key: olcSuffix, val: "{{ ldap_domain_dn }}" } - - { key: olcRootDN, val: "cn=admin,{{ ldap_domain_dn }}" } - - { key: olcRootPW, val: "{{ passwd.stdout }}" } - - { key: olcAccess, val: "{0}to attrs=userPassword,shadowLastChange\n - by self write\n - by anonymous auth\n - by dn=\"cn=admin,{{ ldap_domain_dn }}\" write\n - by * none" } - - { key: olcAccess, val: "{1}to dn.base=\"\" by * read" } - - { key: olcAccess, val: "{2}to * \n - by self write\n - by dn.base=\"gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth\" write\n - by dn=\"cn=admin,{{ ldap_domain_dn }}\" write\n - by * read" } - # - { key: olcAccess, val: "{0}to *\n - # by dn.base=\"gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth\" write\n - # by dn.base=\"cn=admin,{{ ldap_domain_dn }}\" write\n - # by * none"} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml deleted file mode 100644 index ccf1d5c7794f4..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- - -- name: Install openldap - apt: name={{ item }} state=installed update_cache=yes - with_items: - - slapd - - ldap-utils - -- name: Create service user for openldap - user: - name: "{{ openldap_server_user }}" - group: "{{ openldap_server_user }}" - createhome: no - system: yes - -- name: Add openldap service user to the ssl-cert group - user: - name: "{{ openldap_server_user }}" - append: yes - groups: ssl-cert - -- name: Copy slapd defaults - template: - src: slapd.j2 - dest: /etc/default/slapd - owner: "{{ openldap_server_user }}" - mode: 0644 - -- name: Restart slapd - service: name=slapd state=restarted enabled=yes - -# bug: https://github.com/ansible/ansible/issues/25665 -# When solved the commented section should replace the copying -# and execution of the subsequent 2 ldif files -#- name: Configure openldap server tls/ssl - 1 -# become: yes -# ignore_errors: yes -# ldap_attr: -# dn: cn=config -# name: "{{ item.0 }}" -# values: "{{ item.1 }}" -# state: exact -# with_together: -# - [ "olcTLSCertificateFile", "olcTLSCertificateKeyFile" ] -# - [ "{{ openldap_ssl_dir }}/cert.pem", "{{ openldap_ssl_dir }}/key.pem" ] - -- name: Copy TLS ldif add config - template: - src: tls_add.ldif.j2 - dest: /tmp/tls_add.ldif - -- name: Run TLS ldif add config - shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/tls_add.ldif - notify: - - restart slapd - ignore_errors: yes - register: addTLS - -- name: Copy TLS ldif replace config - template: - src: tls_replace.ldif.j2 - dest: /tmp/tls_replace.ldif - when: addTLS|failed - -- name: Run TLS ldif replace config - shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/tls_replace.ldif - notify: - - restart slapd - when: addTLS|failed - -- name: Restart slapd - service: name=slapd state=restarted enabled=yes - -- name: Configure local clients to trust slapd cert - template: - src: ldap.conf.j2 - dest: /etc/ldap/ldap.conf - backup: yes -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml deleted file mode 100644 index a836c535bf6ae..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: provision_deps.yml -- include: install.yml -- include: configure.yml -- include: populate.yml -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml deleted file mode 100644 index 0c1200042e391..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- - -- name: Hash Password - shell: "slappasswd -s {{ ldap_password }}" - register: passwd - -- name: Load top level entry - ldap_entry: - dn: "{{ ldap_domain_dn }}" - objectClass: domain - attributes: - dc: "{{ ldap_domain.split('.')[0] }}" - -- name: Add people root entry - ldap_entry: - dn: "ou=people,{{ ldap_domain_dn }}" - objectClass: organizationalUnit - attributes: - ou: people - -- name: Add people entries - ldap_entry: - dn: "uid={{ item.uid }},ou=people,{{ ldap_domain_dn }}" - objectClass: - - top - - posixAccount - - inetOrgPerson - attributes: - userPassword: "{{ passwd.stdout }}" - uid: "{{ item.uid }}" - uidNumber: "{{ item.uidNumber }}" - gidNumber: "{{ item.uidNumber }}" - homeDirectory: "/home/{{ item.uidNumber }}" - mail: "{{ item.name }}@{{ ldap_domain }}" - cn: "{{ item.name }}" - sn: "{{ item.name }}" - with_items: - - { uid: kraken, uidNumber: 1000, name: "Commander Kraken" } - - { uid: hulk, uidNumber: 1001, name: "Bruce Banner" } - - { uid: hawkeye, uidNumber: 1002, name: "Clint Barton" } - - { uid: jarvis, uidNumber: 1003, name: "Jarvis" } - - { uid: blackwidow, uidNumber: 1004, name: "Natasha Romanova" } - - { uid: fury, uidNumber: 1005, name: "Nick Fury" } - - { uid: phil, uidNumber: 1006, name: "Phil Colson" } - - { uid: cap, uidNumber: 1007, name: "Steve Rogers" } - - { uid: thor, uidNumber: 1008, name: "Thor Odinson" } - - { uid: ironman, uidNumber: 1009, name: "Tony Stark" } - - { uid: odin, uidNumber: 1010, name: "Gods" } - - { uid: selvig, uidNumber: 1011, name: "Erik Selvig" } - -- name: Add group entries - ldap_entry: - dn: "cn={{ item.name }},ou=people,{{ ldap_domain_dn }}" - objectClass: - - top - - posixGroup - attributes: - cn: "{{ item.name }}" - gidNumber: "{{ item.gid }}" - with_items: - - "{{ group_members }}" - -- name: Populate group entries - ldap_attr: - dn: "cn={{ item.0.name }},ou=people,{{ ldap_domain_dn }}" - name: memberUid - values: "{{ item.1 }}" - with_subelements: - - "{{ group_members }}" - - memberUid - -# print users: -# sudo ldapsearch -H ldapi:// -Y EXTERNAL -LLL -b "{{ ldap_domain_dn }}" '(objectClass=person)' cn mail uid -# print groups: -# sudo ldapsearch -H ldapi:// -Y EXTERNAL -LLL -b "{{ ldap_domain_dn }}" '(objectClass=posixGroup)' - -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml deleted file mode 100644 index 2c1874c488aa6..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Install python-ldap dependecies - apt: name={{ item }} state=installed update_cache=yes - with_items: - - libsasl2-dev - - python-dev - - libldap2-dev - - libssl-dev - -- name: Install python-ldap - pip: name=python-ldap diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 deleted file mode 100644 index e3b49df61eeca..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 +++ /dev/null @@ -1,17 +0,0 @@ -# -# LDAP Defaults -# - -# See ldap.conf(5) for details -# This file should be world readable but not world writable. - -#BASE dc=example,dc=com -#URI ldap://ldap.example.com ldap://ldap-master.example.com:666 - -#SIZELIMIT 12 -#TIMELIMIT 15 -#DEREF never - -# TLS certificates (needed for GnuTLS) -#TLS_CACERT /etc/ssl/certs/ca-certificates.crt -TLS_CACERT {{ ssl_dir_certs }}/ca_server.pem diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 deleted file mode 100644 index 5a202fa9fe6a4..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 +++ /dev/null @@ -1,46 +0,0 @@ -# Default location of the slapd.conf file or slapd.d cn=config directory. If -# empty, use the compiled-in default (/etc/ldap/slapd.d with a fallback to -# /etc/ldap/slapd.conf). -SLAPD_CONF= - -# System account to run the slapd server under. If empty the server -# will run as root. - -SLAPD_USER="{{ openldap_server_user }}" - -# System group to run the slapd server under. If empty the server will -# run in the primary group of its user. -SLAPD_GROUP="{{ openldap_server_user }}" - -# Path to the pid file of the slapd server. If not set the init.d script -# will try to figure it out from $SLAPD_CONF (/etc/ldap/slapd.d by -# default) -SLAPD_PIDFILE= - -# slapd normally serves ldap only on all TCP-ports 389. slapd can also -# service requests on TCP-port 636 (ldaps) and requests via unix -# sockets. -# Example usage: -# SLAPD_SERVICES="ldap://127.0.0.1:389/ ldaps:/// ldapi:///" -SLAPD_SERVICES="ldap:/// ldapi:/// ldaps:///" - -# If SLAPD_NO_START is set, the init script will not start or restart -# slapd (but stop will still work). Uncomment this if you are -# starting slapd via some other means or if you don't want slapd normally -# started at boot. -#SLAPD_NO_START=1 - -# If SLAPD_SENTINEL_FILE is set to path to a file and that file exists, -# the init script will not start or restart slapd (but stop will still -# work). Use this for temporarily disabling startup of slapd (when doing -# maintenance, for example, or through a configuration management system) -# when you don't want to edit a configuration file. -SLAPD_SENTINEL_FILE=/etc/ldap/noslapd - -# For Kerberos authentication (via SASL), slapd by default uses the system -# keytab file (/etc/krb5.keytab). To use a different keytab file, -# uncomment this line and change the path. -#export KRB5_KTNAME=/etc/krb5.keytab - -# Additional options to pass to slapd -SLAPD_OPTIONS="" diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 deleted file mode 100644 index f2bbefd3aee0c..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 +++ /dev/null @@ -1,10 +0,0 @@ -dn: cn=config -changetype: modify -add: olcTLSCACertificateFile -olcTLSCACertificateFile: {{ ssl_dir_certs }}/ca_server.pem -- -add: olcTLSCertificateFile -olcTLSCertificateFile: {{ ssl_dir_certs }}/{{ openldap_cert_name }} -- -add: olcTLSCertificateKeyFile -olcTLSCertificateKeyFile: {{ ssl_dir_private }}/{{ openldap_key_name }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 deleted file mode 100644 index 40af5a77bec51..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 +++ /dev/null @@ -1,10 +0,0 @@ -dn: cn=config -changetype: modify -replace: olcTLSCACertificateFile -olcTLSCACertificateFile: {{ ssl_dir_certs }}/ca_server.pem -- -replace: olcTLSCertificateFile -olcTLSCertificateFile: {{ ssl_dir_certs }}/{{ openldap_cert_name }} -- -replace: olcTLSCertificateKeyFile -olcTLSCertificateKeyFile: {{ ssl_dir_private }}/{{ openldap_key_name }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml deleted file mode 100644 index dd61e48ca2bd1..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -default_db_dn: "olcDatabase={1}mdb,cn=config" -group_members: - - {name: Hydra, gid: 101, memberUid: [kraken] } - - {name: Geniuses, gid: 102, memberUid: [hulk, ironman, selvig] } - - {name: SHIELD, gid: 103, memberUid: [hulk, hawkeye, blackwidow, fury, phil, cap, thor, ironman] } - - {name: Philanthropists, gid: 104, memberUid: [hulk, thor, ironman] } - - {name: Avengers, gid: 105, memberUid: [hulk, hawkeye, blackwidow, fury, cap, thor, ironman] } - - {name: Gods, gid: 106, memberUid: [thor, odin] } - - {name: Playboys, gid: 107, memberUid: [ironman] } - - {name: Billionaries, gid: 108, memberUid: [ironman] } diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml deleted file mode 100644 index 9ee1bd3f17818..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - uid mail cn - - - - - - - - cn - - - - - - diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml deleted file mode 100644 index f3d67396f503a..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: Restart Tomcat Service - become: yes - service: name=tomcat8 state=restarted enabled=yes diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml deleted file mode 100644 index 275a7ca9d9feb..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml +++ /dev/null @@ -1,88 +0,0 @@ ---- -- name: Download Shibboleth IdP - get_url: - url: "http://shibboleth.net/downloads/identity-provider/{{ shib_idp_version }}/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" - timeout: 30 - dest: "/tmp/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" - sha256sum: 8bd852dcdc7e6729ee645c0374a3c476b152fa24506fb86ffec33dfd190e607c - -- name: Create base directory for shibboleth idp - file: - path: "{{ shib_installdir }}" - state: directory - owner: "{{ ansible_ssh_user }}" - -- name: Extract shibboleth - unarchive: - src: "/tmp/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" - dest: "{{ shib_installdir }}" - remote_src: yes - -- name: Copying shibboleth idp install files - template: - src: idp.properties.j2 - dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.properties" - -- name: Copying shibboleth ldap properties - template: - src: ldap.properties.j2 - dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/ldap.properties" - -- name: Copy silent installation file - template: - src: idp.silent.params.j2 - dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.silent.params" - -- name: Install Shibboleth IdP - shell: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/install.sh -propertyfile {{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.silent.params" - environment: - JAVA_HOME: "{{ java_home }}" - args: - creates: "{{ shib_home }}" - -- name: Turn on log debugging level - ignore_errors: yes - replace: - path: "{{ shib_home }}/conf/logback.xml" - regexp: '' - replace: "" - -# HACK! -- name: Fix metadata error because of port after hostname - replace: - path: "{{ shib_home }}/metadata/idp-metadata.xml" - regexp: "({{ shib_dns_name }}):8443" - replace: '\1' - -- name: Fetch idp metadata - fetch: - src: "{{ shib_home }}/metadata/idp-metadata.xml" - dest: "generated/" - flat: yes - -- name: Setting attr resolve/release policy and enable unverified RPs - copy: - src: "{{ item }}" - dest: "{{ shib_home }}/conf/{{ item }}" - mode: 0600 - with_items: - - attribute-resolver.xml - - attribute-filter.xml - - relying-party.xml - -- name: Change owner and group for Shibboleth IdP - file: - path: "{{ shib_home }}" - recurse: yes - owner: tomcat8 - group: tomcat8 - -- name: Copy shib war descriptor to Catalina localhost - template: - src: idp.xml.j2 - dest: /etc/tomcat8/Catalina/localhost/idp.xml - owner: tomcat8 - group: tomcat8 - notify: Restart Tomcat Service - -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 deleted file mode 100644 index 0b7f0f76f2a45..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 +++ /dev/null @@ -1,4 +0,0 @@ -idp.entityID=https://test.shibboleth.elastic.local/ -idp.scope=test.elastic.co -idp.sealer.storePassword={{ idp_sealer_password }} -idp.sealer.keyPassword={{ idp_sealer_password }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 deleted file mode 100644 index dd919887ad951..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 +++ /dev/null @@ -1,8 +0,0 @@ -idp.noprompt="true" -idp.host.name={{ shib_dns_name }} -idp.src.dir={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/ -idp.target.dir={{ shib_installdir }}/shibboleth-idp/ -idp.sealer.password={{ idp_sealer_password }} -idp.keystore.password={{ idp_keystore_password }} -idp.merge.properties={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.properties -ldap.merge.properties={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/ldap.properties diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 deleted file mode 100644 index db75a533f8dbe..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 deleted file mode 100644 index 939effe589f2f..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 +++ /dev/null @@ -1,11 +0,0 @@ -idp.authn.LDAP.authenticator = bindSearchAuthenticator -idp.authn.LDAP.ldapURL = ldap://{{ ansible_fqdn }}:389 -idp.authn.LDAP.useStartTLS = true -idp.authn.LDAP.sslConfig = certificateTrust -idp.authn.LDAP.trustCertificates= {{ ssl_dir_certs }}/ca_server.pem -idp.authn.LDAP.baseDN = ou=people,{{ ldap_domain_dn }} -idp.authn.LDAP.subtreeSearch = true -idp.authn.LDAP.userFilter = (uid={user}) -idp.authn.LDAP.bindDN = cn=admin,{{ ldap_domain_dn }} -idp.authn.LDAP.bindDNCredential = {{ ldap_password }} -idp.authn.LDAP.dnFormat = uid=%s,ou=people,{{ ldap_domain_dn }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml deleted file mode 100644 index f3d67396f503a..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -- name: Restart Tomcat Service - become: yes - service: name=tomcat8 state=restarted enabled=yes diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml deleted file mode 100644 index e018e58f9ba7a..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- - -- name: Install Tomcat 8 - apt: - name: tomcat8 - update_cache: yes - state: latest - -- name: Add tomcat8 service user to the ssl-cert group - user: - name: tomcat8 - append: yes - groups: ssl-cert - -- name: Enable HTTPS connector - template: - src: server.xml.j2 - dest: /etc/tomcat8/server.xml - group: tomcat8 - mode: 0640 - notify: Restart Tomcat Service -... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 deleted file mode 100644 index 62ba3233223b1..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 +++ /dev/null @@ -1,147 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml b/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml deleted file mode 100644 index de2c85f4ea31d..0000000000000 --- a/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -ssl_dir: /etc/ssl -ssl_dir_private: "{{ ssl_dir }}/private" -ssl_dir_certs: "{{ ssl_dir }}/certs" - -openldap_server_user: openldap -openldap_key_name: ldap_server.key -openldap_cert_name: ldap_server.pem - -ldap_password: NickFuryHeartsES -ldap_domain: oldap.test.elasticsearch.com -ldap_domain_dn: "dc={{ ldap_domain.split('.')[0] }},dc={{ ldap_domain.split('.')[1] }},dc={{ ldap_domain.split('.')[2] }},dc={{ ldap_domain.split('.')[3] }}" - -tomcat_key_name: tomcat_server.key -tomcat_cert_name: tomcat_server.pem -tomcat_keystore_name: tomcat_server.p12 -tomcat_keystore_pass: secret -tomcat_key_alias: tomcat - -shib_idp_version: 3.3.1 -shib_installdir: /opt -shib_home: "{{ shib_installdir }}/shibboleth-idp" -shib_dns_name: "{{ ansible_fqdn }}:60443" -idp_sealer_password: secret -idp_keystore_password: secret1 -... diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index 846c38829870a..5b2161de2907b 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -1,4 +1,4 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.test.fixtures' -unitTest.enabled = false +test.enabled = false